From df7ff35ac4ca8bb328145ee51fddea9f87e577c3 Mon Sep 17 00:00:00 2001 From: Thomas Dehaeze Date: Mon, 20 Apr 2020 18:58:10 +0200 Subject: [PATCH] Add content folder --- content/book/_index.md | 8 + .../book/albertos04_multiv_contr_system.md | 20 + .../du10_model_contr_vibrat_mechan_system.md | 22 + .../book/du19_multi_actuat_system_contr.md | 659 ++ content/book/ewins00_modal.md | 4560 ++++++++++++ ...leming14_desig_model_contr_nanop_system.md | 21 + .../horowitz15_art_of_elect_third_edition.md | 20 + .../book/leach14_fundam_princ_engin_nanom.md | 93 + .../leach18_basic_precis_engin_edition.md | 20 + ..._vibrat_contr_activ_struc_fourt_edition.md | 1819 +++++ ..._desig_high_perfor_mechat_revis_edition.md | 37 + .../book/skogestad07_multiv_feedb_contr.md | 6269 +++++++++++++++++ content/book/taghirad13_paral.md | 2916 ++++++++ content/paper/_index.md | 8 + .../alkhatib03_activ_struc_vibrat_contr.md | 230 + content/paper/bibel92_guidel_h.md | 185 + .../butler11_posit_contr_lithog_equip.md | 20 + ...0_ident_decoup_contr_flexur_joint_hexap.md | 22 + ...ette11_review_activ_vibrat_isolat_strat.md | 79 + content/paper/collette14_vibrat.md | 104 + ...ette15_sensor_fusion_method_high_perfor.md | 28 + .../paper/dasgupta00_stewar_platf_manip.md | 37 + .../devasia07_survey_contr_issues_nanop.md | 30 + ...fleming10_nanop_system_with_force_feedb.md | 122 + content/paper/fleming12_estim.md | 22 + ...eming13_review_nanom_resol_posit_sensor.md | 185 + .../furqan17_studies_stewar_platf_manip.md | 22 + ...tani04_nanom_cuttin_machin_using_stewar.md | 38 + .../paper/gao15_measur_techn_precis_posit.md | 20 + .../paper/garg07_implem_chall_multiv_contr.md | 39 + ...geng95_intel_contr_system_multip_degree.md | 24 + content/paper/hanieh03_activ_stewar.md | 34 + .../hauge04_sensor_contr_space_based_six.md | 144 + .../paper/holler12_instr_x_ray_nano_imagin.md | 42 + ..._activ_dampin_based_decoup_colloc_contr.md | 20 + .../ito16_compar_class_high_precis_actuat.md | 71 + .../jiao18_dynam_model_exper_analy_stewar.md | 20 + .../legnani12_new_isotr_decoup_paral_manip.md | 34 + .../li01_simul_fault_vibrat_isolat_point.md | 233 + .../li01_simul_vibrat_isolat_point_contr.md | 23 + ...omen18_advan_motion_contr_precis_mechat.md | 20 + .../poel10_explor_activ_hard_mount_vibrat.md | 42 + ...eumont02_force_feedb_versus_accel_feedb.md | 49 + .../preumont07_six_axis_singl_stage_activ.md | 49 + .../saxena12_advan_inter_model_contr_techn.md | 88 + content/paper/schellekens98_desig_precis.md | 20 + .../sebastian12_nanop_with_multip_sensor.md | 20 + .../spanos95_soft_activ_vibrat_isolat.md | 62 + ...7_inter_charac_rotat_stages_x_ray_nanot.md | 33 + .../tang18_decen_vibrat_contr_voice_coil.md | 21 + ...fusion_activ_vibrat_isolat_precis_equip.md | 50 + .../wang12_autom_marker_full_field_hard.md | 29 + ...wang16_inves_activ_vibrat_isolat_stewar.md | 57 + .../yang19_dynam_model_decoup_contr_flexib.md | 137 + content/paper/zhang11_six_dof.md | 34 + content/websites/control_bootcamp.md | 126 + ...dynamical_systems_with_machine_learning.md | 183 + content/zettels/_index.md | 8 + content/zettels/active_damping.md | 16 + content/zettels/actuators.md | 72 + content/zettels/complementary_filters.md | 15 + content/zettels/cubic_architecture.md | 17 + content/zettels/electronics.md | 15 + content/zettels/flexible_joints.md | 21 + content/zettels/force_sensors.md | 23 + content/zettels/h_infinity_control.md | 24 + content/zettels/inertial_sensors.md | 47 + content/zettels/metrology.md | 15 + content/zettels/motion_control.md | 15 + content/zettels/multivariable_control.md | 19 + .../nano_active_stabilization_system.md | 17 + content/zettels/position_sensors.md | 115 + content/zettels/positioning_stations.md | 17 + content/zettels/precision_engineering.md | 16 + content/zettels/reference_books.md | 20 + content/zettels/sensor_fusion.md | 19 + content/zettels/stewart_platforms.md | 34 + content/zettels/system_identification.md | 15 + content/zettels/test.md | 103 + content/zettels/vibration_isolation.md | 33 + 80 files changed, 20016 insertions(+) create mode 100644 content/book/_index.md create mode 100644 content/book/albertos04_multiv_contr_system.md create mode 100644 content/book/du10_model_contr_vibrat_mechan_system.md create mode 100644 content/book/du19_multi_actuat_system_contr.md create mode 100644 content/book/ewins00_modal.md create mode 100644 content/book/fleming14_desig_model_contr_nanop_system.md create mode 100644 content/book/horowitz15_art_of_elect_third_edition.md create mode 100644 content/book/leach14_fundam_princ_engin_nanom.md create mode 100644 content/book/leach18_basic_precis_engin_edition.md create mode 100644 content/book/preumont18_vibrat_contr_activ_struc_fourt_edition.md create mode 100644 content/book/schmidt14_desig_high_perfor_mechat_revis_edition.md create mode 100644 content/book/skogestad07_multiv_feedb_contr.md create mode 100644 content/book/taghirad13_paral.md create mode 100644 content/paper/_index.md create mode 100644 content/paper/alkhatib03_activ_struc_vibrat_contr.md create mode 100644 content/paper/bibel92_guidel_h.md create mode 100644 content/paper/butler11_posit_contr_lithog_equip.md create mode 100644 content/paper/chen00_ident_decoup_contr_flexur_joint_hexap.md create mode 100644 content/paper/collette11_review_activ_vibrat_isolat_strat.md create mode 100644 content/paper/collette14_vibrat.md create mode 100644 content/paper/collette15_sensor_fusion_method_high_perfor.md create mode 100644 content/paper/dasgupta00_stewar_platf_manip.md create mode 100644 content/paper/devasia07_survey_contr_issues_nanop.md create mode 100644 content/paper/fleming10_nanop_system_with_force_feedb.md create mode 100644 content/paper/fleming12_estim.md create mode 100644 content/paper/fleming13_review_nanom_resol_posit_sensor.md create mode 100644 content/paper/furqan17_studies_stewar_platf_manip.md create mode 100644 content/paper/furutani04_nanom_cuttin_machin_using_stewar.md create mode 100644 content/paper/gao15_measur_techn_precis_posit.md create mode 100644 content/paper/garg07_implem_chall_multiv_contr.md create mode 100644 content/paper/geng95_intel_contr_system_multip_degree.md create mode 100644 content/paper/hanieh03_activ_stewar.md create mode 100644 content/paper/hauge04_sensor_contr_space_based_six.md create mode 100644 content/paper/holler12_instr_x_ray_nano_imagin.md create mode 100644 content/paper/holterman05_activ_dampin_based_decoup_colloc_contr.md create mode 100644 content/paper/ito16_compar_class_high_precis_actuat.md create mode 100644 content/paper/jiao18_dynam_model_exper_analy_stewar.md create mode 100644 content/paper/legnani12_new_isotr_decoup_paral_manip.md create mode 100644 content/paper/li01_simul_fault_vibrat_isolat_point.md create mode 100644 content/paper/li01_simul_vibrat_isolat_point_contr.md create mode 100644 content/paper/oomen18_advan_motion_contr_precis_mechat.md create mode 100644 content/paper/poel10_explor_activ_hard_mount_vibrat.md create mode 100644 content/paper/preumont02_force_feedb_versus_accel_feedb.md create mode 100644 content/paper/preumont07_six_axis_singl_stage_activ.md create mode 100644 content/paper/saxena12_advan_inter_model_contr_techn.md create mode 100644 content/paper/schellekens98_desig_precis.md create mode 100644 content/paper/sebastian12_nanop_with_multip_sensor.md create mode 100644 content/paper/spanos95_soft_activ_vibrat_isolat.md create mode 100644 content/paper/stankevic17_inter_charac_rotat_stages_x_ray_nanot.md create mode 100644 content/paper/tang18_decen_vibrat_contr_voice_coil.md create mode 100644 content/paper/tjepkema12_sensor_fusion_activ_vibrat_isolat_precis_equip.md create mode 100644 content/paper/wang12_autom_marker_full_field_hard.md create mode 100644 content/paper/wang16_inves_activ_vibrat_isolat_stewar.md create mode 100644 content/paper/yang19_dynam_model_decoup_contr_flexib.md create mode 100644 content/paper/zhang11_six_dof.md create mode 100644 content/websites/control_bootcamp.md create mode 100644 content/websites/data_driven_dynamical_systems_with_machine_learning.md create mode 100644 content/zettels/_index.md create mode 100644 content/zettels/active_damping.md create mode 100644 content/zettels/actuators.md create mode 100644 content/zettels/complementary_filters.md create mode 100644 content/zettels/cubic_architecture.md create mode 100644 content/zettels/electronics.md create mode 100644 content/zettels/flexible_joints.md create mode 100644 content/zettels/force_sensors.md create mode 100644 content/zettels/h_infinity_control.md create mode 100644 content/zettels/inertial_sensors.md create mode 100644 content/zettels/metrology.md create mode 100644 content/zettels/motion_control.md create mode 100644 content/zettels/multivariable_control.md create mode 100644 content/zettels/nano_active_stabilization_system.md create mode 100644 content/zettels/position_sensors.md create mode 100644 content/zettels/positioning_stations.md create mode 100644 content/zettels/precision_engineering.md create mode 100644 content/zettels/reference_books.md create mode 100644 content/zettels/sensor_fusion.md create mode 100644 content/zettels/stewart_platforms.md create mode 100644 content/zettels/system_identification.md create mode 100644 content/zettels/test.md create mode 100644 content/zettels/vibration_isolation.md diff --git a/content/book/_index.md b/content/book/_index.md new file mode 100644 index 0000000..7265672 --- /dev/null +++ b/content/book/_index.md @@ -0,0 +1,8 @@ ++++ +title = "Books" +author = ["Thomas Dehaeze"] +type = "book" +draft = false ++++ + +Here is the list of books I took note about. diff --git a/content/book/albertos04_multiv_contr_system.md b/content/book/albertos04_multiv_contr_system.md new file mode 100644 index 0000000..4dc3f09 --- /dev/null +++ b/content/book/albertos04_multiv_contr_system.md @@ -0,0 +1,20 @@ ++++ +title = "Multivariable control systems: an engineering approach" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Multivariable Control]({{< relref "multivariable_control" >}}) + +Reference +: (Albertos \& Antonio, 2004) + +Author(s) +: Albertos, P., & Antonio, S. + +Year +: 2004 + +# Bibliography +Albertos, P., & Antonio, S., *Multivariable control systems: an engineering approach* (2004), : Springer-Verlag. [↩](#51789ae56fcd2284b8426153d90595c8) diff --git a/content/book/du10_model_contr_vibrat_mechan_system.md b/content/book/du10_model_contr_vibrat_mechan_system.md new file mode 100644 index 0000000..51f0ecc --- /dev/null +++ b/content/book/du10_model_contr_vibrat_mechan_system.md @@ -0,0 +1,22 @@ ++++ +title = "Modeling and control of vibration in mechanical systems" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Stewart Platforms]({{< relref "stewart_platforms" >}}), [Vibration Isolation]({{< relref "vibration_isolation" >}}) + +Reference +: (Chunling Du \& Lihua Xie, 2010) + +Author(s) +: Du, C., & Xie, L. + +Year +: 2010 + +Read Chapter 1 and 3. + +# Bibliography +Du, C., & Xie, L., *Modeling and control of vibration in mechanical systems* (2010), : CRC Press. [↩](#1d38bd128d92142dd456ab4e9bb4eb84) diff --git a/content/book/du19_multi_actuat_system_contr.md b/content/book/du19_multi_actuat_system_contr.md new file mode 100644 index 0000000..4e6b151 --- /dev/null +++ b/content/book/du19_multi_actuat_system_contr.md @@ -0,0 +1,659 @@ ++++ +title = "Multi-stage actuation systems and control" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: + + +Reference +: (Chunling Du \& Chee Khiang Pang, 2019) + +Author(s) +: Du, C., & Pang, C. K. + +Year +: 2019 + + +## Mechanical Actuation Systems {#mechanical-actuation-systems} + + +### Introduction {#introduction} + +When high bandwidth, high position accuracy and long stroke are required simultaneously: dual-stage systems composed of a coarse (or primary) actuator and a fine actuator working together are used. + +Popular choices for coarse actuator are: + +- DC motor +- Voice coil motor (VCM) +- Permanent magnet stepper motor +- Permanent magnet linear synchronous motor + +As fine actuators, most of the time piezoelectric actuator are used. + +In order to overcome fine actuator stringent stroke limitation and increase control bandwidth, three-stage actuation systems are necessary in practical applications. + + +### Actuators {#actuators} + + +#### Primary Actuator {#primary-actuator} + +Without loss of generality, the VCM actuator is used as the primary actuator. +When current passes through the coil, a force is produced which accelerates the actuator radially. +The produced force is a function of the current \\(i\_c\\): +\\[ f\_m = k\_t i\_c \\] +where \\(k\_t\\) is a linearized nominal value called the torque constant. + +The resonance of the actuator is mainly due to the flexibility of the pivot bearing, arm, suspension. + +Then the bandwidth of the control loop is low and the resonances are not a limiting factor of the control design, the actuator model can be considered as follows: +\\[ P\_v(s) = \frac{k\_{vcm}}{s^2} \\] + +When the bandwidth is high, the actuator resonances have to be considered in the control design since the flexible resonance modes will reduce the system stability and affect the control performance. Then the actuator model becomes +\\[ P\_v(s) = \frac{k\_{vcm}}{s^2} P\_r(s) \\] +which includes the resonance model +\\[ P\_r(s) = \Pi\_{i=1}^{N} P\_{ri}(s) \\] +and the resonance \\(P\_{ri}(s)\\) can be represented as one of the following forms + +\begin{align\*} + P\_{ri}(s) &= \frac{\omega\_i^2}{s^2 + 2 \xi\_i \omega\_i s + \omega\_i^2} \\\\\\ + P\_{ri}(s) &= \frac{b\_{1i} \omega\_i s + b\_{0i} \omega\_i^2}{s^2 + 2 \xi\_i \omega\_i s + \omega\_i^2} \\\\\\ + P\_{ri}(s) &= \frac{b\_{2i} s^2 + b\_{1i} \omega\_i s + b\_{0i} \omega\_i^2}{s^2 + 2 \xi\_i \omega\_i s + \omega\_i^2} +\end{align\*} + + +#### Secondary Actuators {#secondary-actuators} + +We here consider two types of secondary actuators: the PZT milliactuator (figure [1](#org53c168b)) and the microactuator. + + + +{{< figure src="/ox-hugo/du19_pzt_actuator.png" caption="Figure 1: A PZT-actuator suspension" >}} + +There are three popular types of micro-actuators: electrostatic moving-slider microactuator, PZT slider-driven microactuator and thermal microactuator. +There characteristics are shown on table [1](#table--tab:microactuator). + + +
+ Table 1: + Performance comparison of microactuators +
+ +| | Elect. | PZT | Thermal | +|-------------|-----------------------------------------------|-----------------------------------------------|----------------------------| +| TF | \\(\frac{K}{s^2 + 2\xi\omega s + \omega^2}\\) | \\(\frac{K}{s^2 + 2\xi\omega s + \omega^2}\\) | \\(\frac{K}{\tau s + 1}\\) | +| \\(\tau\\) | \\(<\SI{0.1}{ms}\\) | \\(<\SI{0.05}{ms}\\) | \\(>\SI{0.1}{ms}\\) | +| \\(omega\\) | \\(1-\SI{2}{kHz}\\) | \\(20-\SI{25}{kHz}\\) | \\(>\SI{15}{kHz}\\) | + + +### Single-Stage Actuation Systems {#single-stage-actuation-systems} + +A typical closed-loop control system is shown on figure [2](#org5941a76), where \\(P\_v(s)\\) and \\(C(z)\\) represent the actuator system and its controller. + + + +{{< figure src="/ox-hugo/du19_single_stage_control.png" caption="Figure 2: Block diagram of a single-stage actuation system" >}} + + +### Dual-Stage Actuation Systems {#dual-stage-actuation-systems} + +Dual-stage actuation mechanism for the hard disk drives consists of a VCM actuator and a secondary actuator placed between the VCM and the sensor head. +The VCM is used as the primary stage to provide long track seeking but with poor accuracy and slow response time, while the secondary stage actuator is used to provide higher positioning accuracy and faster response but with a stroke limit. + + + +{{< figure src="/ox-hugo/du19_dual_stage_control.png" caption="Figure 3: Block diagram of dual-stage actuation system" >}} + + +### Three-Stage Actuation Systems {#three-stage-actuation-systems} + +Due to the limited allowed stroke of the microactuator, the control bandwidth has to be restricted and that limits the dual-stage disturbance rejection capability. + +A three-stage actuation system is therefore introduced to further increase the bandwidth. + +Typically, a VCM actuator is used as the primary actuator, PZT milliactuator as the second stage actuator and a third actuator more collocated is used. + + +## High-Precision Positioning Control of Dual-Stage Actuation Systems {#high-precision-positioning-control-of-dual-stage-actuation-systems} + + +### Introduction {#introduction} + +The sensitivity function of the closed-loop system has provided a straightforward view of its disturbance rejection capability. +It is demanded that the sensitivity function magnitude in the low-frequency range be sufficiently low, while its hump in high-frequency range stays low enough. +In view of this, the controller design for dual-stage actuation systems adopts a weighting function to shape the sensitivity function. + + +### Control Schemes {#control-schemes} + +A popular control scheme for dual-stage actuation system is the **decoupled structure** as shown in figure [4](#org03d53e8). + +- \\(C\_v(z)\\) and \\(C\_p(z)\\) are the controllers respectively, for the primary VCM actuator \\(P\_v(s)\\) and the secondary actuator \\(P\_p(s)\\). +- \\(\hat{P}\_p(z)\\) is an approximation of \\(P\_p\\) to estimate \\(y\_p\\). +- \\(d\_1\\) and \\(d\_2\\) denote internal disturbances +- \\(n\\) is the measurement noise +- \\(d\_u\\) stands for external vibration + + + +{{< figure src="/ox-hugo/du19_decoupled_control.png" caption="Figure 4: Decoupled control structure for the dual-stage actuation system" >}} + +The open-loop transfer function from \\(pes\\) to \\(y\\) is +\\[ G(z) = P\_p(z) C\_p(z) + P\_v(z) C\_v(z) + P\_v(z) C\_v(z) \hat{P}\_p(z) C\_p(z) \\] +And the overall sensitivity function of the closed loop system from \\(r\\) to \\(pes\\) is +\\[ S(z) = \frac{1}{1 + G(z)} \\] +which is approximately +\\[ S(z) = \frac{1}{[1 + P\_p(z) C\_p(z)] [1 + P\_v(z)C\_v(z)]} \\] +since within a certain bandwidth +\\[ \hat{P}\_p(z) \approx P\_p(z) \\] + +The sensitivity functions of the VCM loop and the secondary actuator loop are + +\begin{equation} + S\_v(z) = \frac{1}{1 + P\_v(z) C\_v(z)}, \quad S\_p(z) = \frac{1}{1 + P\_p(z) C\_p(z)} +\end{equation} + +And we obtain that the dual-stage sensitivity function \\(S(z)\\) is the product of \\(S\_v(z)\\) and \\(S\_p(z)\\). +Thus, the dual-stage system control design can be decoupled into two independent controller designs. + +Another type of control scheme is the **parallel structure** as shown in figure [5](#org37116a9). +The open-loop transfer function from \\(pes\\) to \\(y\\) is +\\[ G(z) = P\_p(z) C\_p(z) + P\_v(z) C\_v(z) \\] + +The overall sensitivity function of the closed-loop system from \\(r\\) to \\(pes\\) is +\\[ S(z) = \frac{1}{1 + G(z)} = \frac{1}{1 + P\_p(z) C\_p(z) + P\_v(z) C\_v(z)} \\] + + + +{{< figure src="/ox-hugo/du19_parallel_control_structure.png" caption="Figure 5: Parallel control structure for the dual-stage actuator system" >}} + +Because of the limited displacement range of the secondary actuator, the control efforts for the two actuators should be distributed properly when designing respective controllers to meet the required performance, make the actuators not conflict with each other, as well as prevent the saturation of the secondary actuator. + + +### Controller Design Method in the Continuous-Time Domain {#controller-design-method-in-the-continuous-time-domain} + +\\(\mathcal{H}\_\infty\\) loop shaping method is used to design the controllers for the primary and secondary actuators. +The structure of the \\(\mathcal{H}\_\infty\\) loop shaping method is plotted in figure [6](#org299b914) where \\(W(s)\\) is a weighting function relevant to the designed control system performance such as the sensitivity function. + +For a plant model \\(P(s)\\), a controller \\(C(s)\\) is to be designed such that the closed-loop system is stable and + +\begin{equation} + \\|T\_{zw}\\|\_\infty < 1 +\end{equation} + +is satisfied, where \\(T\_{zw}\\) is the transfer function from \\(w\\) to \\(z\\): \\(T\_{zw} = S(s) W(s)\\). + + + +{{< figure src="/ox-hugo/du19_h_inf_diagram.png" caption="Figure 6: Block diagram for \\(\mathcal{H}\_\infty\\) loop shaping method to design the controller \\(C(s)\\) with the weighting function \\(W(s)\\)" >}} + +Equation [1](#org361ec91) means that \\(S(s)\\) can be shaped similarly to the inverse of the chosen weighting function \\(W(s)\\). +One form of \\(W(s)\\) is taken as + +\begin{equation} + W(s) = \frac{\frac{1}{M}s^2 + 2\xi\omega\frac{1}{\sqrt{M}}s + \omega^2}{s^2 + 2\omega\sqrt{\epsilon}s + \omega^2\epsilon} +\end{equation} + +where \\(\omega\\) is the desired bandwidth, \\(\epsilon\\) is used to determine the desired low frequency level of sensitivity magnitude and \\(\xi\\) is the damping ratio. + +The controller can then be synthesis using the linear matrix inequality (LMI) approach. + +The primary and secondary actuator control loops are designed separately for the dual-stage control systems. +But when designing their respective controllers, certain performances are required for the two actuators, so that control efforts for the two actuators are distributed properly and the actuators don't conflict with each other's control authority. +As seen in figure [7](#org60ad057), the VCM primary actuator open loop has a higher gain at low frequencies, and the secondary actuator open loop has a higher gain in the high-frequency range. + + + +{{< figure src="/ox-hugo/du19_dual_stage_loop_gain.png" caption="Figure 7: Frequency responses of \\(G\_v(s) = C\_v(s)P\_v(s)\\) (solid line) and \\(G\_p(s) = C\_p(s) P\_p(s)\\) (dotted line)" >}} + +The sensitivity functions are shown in figure [8](#org1d6afb9), where the hump of \\(S\_v\\) is arranged within the bandwidth of \\(S\_p\\) and the hump of \\(S\_p\\) is lowered as much as possible. +This needs to decrease the bandwidth of the primary actuator loop and increase the bandwidth of the secondary actuator loop. + + + +{{< figure src="/ox-hugo/du19_dual_stage_sensitivity.png" caption="Figure 8: Frequency response of \\(S\_v(s)\\) and \\(S\_p(s)\\)" >}} + +A basic requirement of the dual-stage actuation control system is to make the individual primary and secondary loops stable. +It also required that the primary actuator path has a higher gain than the secondary actuator path at low frequency range and the secondary actuator path has a higher gain than the primary actuator path in high-frequency range. +These can be achieve by choosing appropriate weighting function for the controllers design. + + +### Conclusion {#conclusion} + +The controller design has been discussed for high-precision positioning control of the dual-stage actuation systems. +The \\(\mathcal{H}\_\infty\\) loop shaping method has been applied and the design method has been presented. +With the weighting functions, the desired sensitivity function can achieved. +Such a design method can produce robust controllers with more disturbance rejection in the low frequency range and less disturbance amplification in the high-frequency range. + + +## Modeling and Control of a Three-Stage Actuation System {#modeling-and-control-of-a-three-stage-actuation-system} + + +### Introduction {#introduction} + +In view of the additional bandwidth requirement which is limited by stroke constraint and saturation of secondary actuators, three-stage actuation systems are thereby proposed to meet the demand of a higher bandwidth. +In this section, a specific three-stage actuation system is presented and a controller strategy is proposed, which is based on a decoupled master-slave dual-stage control structure combined with a third stage actuation in parallel format. + + +### Actuator and Vibration Modeling {#actuator-and-vibration-modeling} + +A VCM actuator is used as the first-stage actuator denoted by \\(P\_v(s)\\), a PZT milliactuator as the second-stage actuator denoted by \\(P\_p(s)\\), and a thermal microactuator denoted by \\(P\_m(s)\\). + + +### Control Strategy and Controller Design {#control-strategy-and-controller-design} + +Figure [9](#org0e50764) shows the control structure for the three-stage actuation system. + +The control scheme is based on the decoupled master-slave dual-stage control and the third stage microactuator is added in parallel with the dual-stage control system. +The parallel format is advantageous to the overall control bandwidth enhancement, especially for the microactuator having limited stroke which restricts the bandwidth of its own loop. +The reason why the decoupled control structure is adopted here is that its overall sensitivity function is the product of those of the two individual loops, and the VCM and the PTZ controllers can be designed separately. + + + +{{< figure src="/ox-hugo/du19_three_stage_control.png" caption="Figure 9: Control system for the three-stage actuation system" >}} + +The open-loop transfer function of the three-stage actuation system is derived as + +\begin{equation} + G(z) = G\_v(z) + G\_p(z) + G\_v(z) G\_p(z) + G\_m(z) +\end{equation} + +with + + \begin{align\*} + G\_v(z) &= P\_v(z) C\_v(z) \\\\\\ + G\_p(z) &= P\_p(z) C\_p(z) \\\\\\ + G\_m(z) &= P\_m(z) C\_m(z) +\end{align\*} + +The overall sensitivity function is given by + +\begin{equation} + S(z) = \frac{1}{1 + G(z)} +\end{equation} + +The VCM actuator \\(P\_v(s)\\) works in a low bandwidth below \\(\SI{1}{kHz}\\). +The PZT actuated milliactuator \\(P\_p(s)\\) works under a reasonably high bandwidth up to \\(\SI{3}{kHz}\\). +The third-stage actuator \\(P\_m(s)\\) is used to further push the bandwidth as high as possible. + +The control performances of both the VCM and the PZT actuators are limited by their dominant resonance modes. +The open-loop frequency responses of the three stages are shown on figure [10](#orgefe88f9). + + + +{{< figure src="/ox-hugo/du19_open_loop_three_stage.png" caption="Figure 10: Frequency response of the open-loop transfer function" >}} + +The obtained sensitivity function is shown on figure [11](#orgd0c25f8). + + + +{{< figure src="/ox-hugo/du19_sensitivity_three_stage.png" caption="Figure 11: Sensitivity function of the VCM single stage, the dual-stage and the three-stage loops" >}} + + +### Performance Evaluation {#performance-evaluation} + +External vibration from the system working environment is much higher than the internal disturbance, especially for ultra-high precision positioning systems. +In the presence of external vibration, the actuators control effort is dominantly determined by the external vibration. +But because the actuator input is constrained, the external vibration level has to be limited. +Otherwise, saturation will occur in the control loop and the control system performance will be degraded. + +Therefore, the stroke specification of the actuators, especially milliactuator and microactuators, is very important for achievable control performance. +Higher stroke actuators have stronger abilities to make sure that the control performances are not degraded in the presence of external vibrations. + +For the three-stage control architecture as shown on figure [9](#org0e50764), the position error is +\\[ e = -S(P\_v d\_1 + d\_2 + d\_e) + S n \\] +The control signals and positions of the actuators are given by + +\begin{align\*} + u\_p &= C\_p e,\ y\_p = P\_p C\_p e \\\\\\ + u\_m &= C\_m e,\ y\_m = P\_m C\_m e \\\\\\ + u\_v &= C\_v ( 1 + \hat{P}\_pC\_p ) e,\ y\_v = P\_v ( u\_v + d\_1 ) +\end{align\*} + +The controller design for the microactuators with input constraints must take into account both external vibration requirements and actuators' stroke, based on which an appropriate bandwidth should be decided when designing the control system. +Higher bandwidth/higher level of disturbance generally means high stroke needed. + + +### Different Configurations of the Control System {#different-configurations-of-the-control-system} + +A decoupled control structure can be used for the three-stage actuation system (see figure [12](#org5bb499d)). + +The overall sensitivity function is +\\[ S(z) = \approx S\_v(z) S\_p(z) S\_m(z) \\] +with \\(S\_v(z)\\) and \\(S\_p(z)\\) are defined in equation [1](#orga34ddfe) and +\\[ S\_m(z) = \frac{1}{1 + P\_m(z) C\_m(z)} \\] + +Denote the dual-stage open-loop transfer function as \\(G\_d\\) +\\[ G\_d(z) = G\_v(z) + G\_p(z) + G\_v(z) G\_p(z) \\] + +The open-loop transfer function of the overall system is +\\[ G(z) = G\_d(z) + G\_m(z) + G\_d(z) G\_m(z) \\] + + + +{{< figure src="/ox-hugo/du19_three_stage_decoupled.png" caption="Figure 12: Decoupled control structure for the three-stage actuation system" >}} + +The control signals and the positions of the three actuators are + +\begin{align\*} + u\_p &= C\_p(1 + \hat{P}\_m C\_m) e, \ y\_p = P\_p u\_p \\\\\\ + u\_m &= C\_m e, \ y\_m = P\_m M\_m e \\\\\\ + u\_v &= C\_v(1 + \hat{P}\_p C\_p) (1 + \hat{P}\_m C\_m) e, \ y\_v = P\_v u\_v +\end{align\*} + +The decoupled configuration makes the low frequency gain much higher, and consequently there is much better rejection capability at low frequency compared to the parallel architecture (see figure [13](#org0a46272)). + + + +{{< figure src="/ox-hugo/du19_three_stage_decoupled_loop_gain.png" caption="Figure 13: Frequency responses of the open-loop transfer functions for the three-stages parallel and decoupled structure" >}} + + +### Conclusion {#conclusion} + +The relationship among the external vibration, the microactuator stroke, and the achievable control bandwidth has been discussed for being considered in the controller design. +The discussion suggests that in addition to the traditional wisdom of just increasing the resonant frequency, adding more stroke to the microactuator will give more freedom to the loop shaping for the control system design. + + +## Dual-Stage System Control Considering Secondary Actuator Stroke Limitation {#dual-stage-system-control-considering-secondary-actuator-stroke-limitation} + + +### Introduction {#introduction} + + +### More Freedom Loop Shaping for Microactuator Controller Design {#more-freedom-loop-shaping-for-microactuator-controller-design} + + +### Dual-Stage System Control Design for 5 kHz Bandwidth {#dual-stage-system-control-design-for-5-khz-bandwidth} + + +### Evaluation with the Consideration of External Vibration and Microactuator Stroke {#evaluation-with-the-consideration-of-external-vibration-and-microactuator-stroke} + + +### Conclusion {#conclusion} + + +## Saturation Control for Microactuators in Dual-Stage Actuation Systems {#saturation-control-for-microactuators-in-dual-stage-actuation-systems} + + +### Introduction {#introduction} + + +### Modeling and Feedback Control {#modeling-and-feedback-control} + + +### Anti-Windup Compensation Design {#anti-windup-compensation-design} + + +### Simulation and Experimental Results {#simulation-and-experimental-results} + + +### Conclusion {#conclusion} + + +## Time Delay and Sampling Rate Effect on Control Performance of Dual-Stage Actuation Systems {#time-delay-and-sampling-rate-effect-on-control-performance-of-dual-stage-actuation-systems} + + +### Introduction {#introduction} + + +### Modeling of Time Delay {#modeling-of-time-delay} + + +### Dual-Stage Actuation System Modeling with Time Delay for Controller Design {#dual-stage-actuation-system-modeling-with-time-delay-for-controller-design} + + +### Controller Design with Time Delay for the Dual-Stage Actuation Systems {#controller-design-with-time-delay-for-the-dual-stage-actuation-systems} + + +### Time Delay Effect on Dual-Stage System Control Performance {#time-delay-effect-on-dual-stage-system-control-performance} + + +### Sampling Rate Effect on Dual-Stage System Control Performance {#sampling-rate-effect-on-dual-stage-system-control-performance} + + +### Conclusion {#conclusion} + + +## PZT Hysteresis Modeling and Compensation {#pzt-hysteresis-modeling-and-compensation} + + +### Introduction {#introduction} + + +### Modeling of Hysteresis {#modeling-of-hysteresis} + + +#### PI Model {#pi-model} + + +#### GPI Model {#gpi-model} + + +#### Inverse GPI Model {#inverse-gpi-model} + + +### Application of GPI Model to a PZT-Actuated Structure {#application-of-gpi-model-to-a-pzt-actuated-structure} + + +#### Modeling of the Hysteresis in the PZT-Actuated Structure {#modeling-of-the-hysteresis-in-the-pzt-actuated-structure} + + +#### Hysteresis Compensator Design {#hysteresis-compensator-design} + + +#### Experimental Verification {#experimental-verification} + + +### Conclusion {#conclusion} + + +## Seeking Control of Dual-Stage Actuation Systems with Trajectory Optimization {#seeking-control-of-dual-stage-actuation-systems-with-trajectory-optimization} + + +### Introduction {#introduction} + + +### Current Profile of VCM Primary Actuator {#current-profile-of-vcm-primary-actuator} + + +#### PTOS Method {#ptos-method} + + +#### A General Form of VCM Current Profiles {#a-general-form-of-vcm-current-profiles} + + +### Control System Structure for the Dual-Stage Actuation System {#control-system-structure-for-the-dual-stage-actuation-system} + + +### Design of VCM Current Profile a[sub(v)] and Dual-Stage Reference Trajectory r[sub(d)] {#design-of-vcm-current-profile-a-sub--v--and-dual-stage-reference-trajectory-r-sub--d} + + +### Seeking within PZT Milliactuator Stroke {#seeking-within-pzt-milliactuator-stroke} + + +### Seeking over PZT Milliactuator Stroke {#seeking-over-pzt-milliactuator-stroke} + + +### Conclusion {#conclusion} + + +## High-Frequency Vibration Control Using PZT Active Damping {#high-frequency-vibration-control-using-pzt-active-damping} + + +### Introduction {#introduction} + + +### Singular Perturbation Method-Based Controller Design {#singular-perturbation-method-based-controller-design} + + +#### Singular Perturbation Control Topology {#singular-perturbation-control-topology} + + +#### Identification of Fast Dynamics Using PZT as a Sensor {#identification-of-fast-dynamics-using-pzt-as-a-sensor} + + +#### Design of Controllers {#design-of-controllers} + + +##### Fast Subsystem Estimator G[sub(v)][sup(\*)] {#fast-subsystem-estimator-g-sub--v--sup} + + +##### Fast Controller C[sub(v)] {#fast-controller-c-sub--v} + + +##### Slow Controller C[sub(v)] {#slow-controller-c-sub--v} + + +#### Simulation and Experimental Results {#simulation-and-experimental-results} + + +##### Frequency Responses {#frequency-responses} + + +##### Time Responses {#time-responses} + + +### H[sub(2)] Controller Design {#h-sub--2--controller-design} + + +### Design of C[sub(d)](z) with H[sub(2)] Method and Notch Filters {#design-of-c-sub--d----z--with-h-sub--2--method-and-notch-filters} + + +### Design of Mixed H[sub(2)]/H[sub(∞)] Controller C[sub(d)](z) {#design-of-mixed-h-sub--2--h-sub-----controller-c-sub--d----z} + + +### Application Results {#application-results} + + +#### System Modeling {#system-modeling} + + +#### H[sub(2)] Active Damping Control {#h-sub--2--active-damping-control} + + +#### Mixed H[sub(2)]/H[sub(∞)] Active Damping Control {#mixed-h-sub--2--h-sub-----active-damping-control} + + +#### Experimental Results {#experimental-results} + + +### Conclusion {#conclusion} + + +## Self-Sensing Actuation of Dual-Stage Systems {#self-sensing-actuation-of-dual-stage-systems} + + +### Introduction {#introduction} + + +### Estimation of PZT Secondary Actuator’s Displacement y[sub(p)][sup(\*)] {#estimation-of-pzt-secondary-actuator-s-displacement-y-sub--p--sup} + + +#### Self-Sensing Actuation and Bridge Circuit {#self-sensing-actuation-and-bridge-circuit} + + +#### PZT Displacement Estimation Circuit H[sub(B)] {#pzt-displacement-estimation-circuit-h-sub--b} + + +### Design of Controllers {#design-of-controllers} + + +#### VCM Controller and Controller C[sub(D)] {#vcm-controller-and-controller-c-sub--d} + + +#### PZT Controller {#pzt-controller} + + +### Performance Evaluation {#performance-evaluation} + + +#### Effectiveness of C[sub(D)] {#effectiveness-of-c-sub--d} + + +#### Position Errors {#position-errors} + + +### Conclusion {#conclusion} + + +## Modeling and Control of a MEMS Micro X–Y Stage Media Platform {#modeling-and-control-of-a-mems-micro-x-y-stage-media-platform} + + +### Introduction {#introduction} + + +### MEMS Micro X–Y Stage {#mems-micro-x-y-stage} + + +#### Design and Simulation of Micro X–Y Stage {#design-and-simulation-of-micro-x-y-stage} + + +##### Static {#static} + + +##### Dynamic {#dynamic} + + +#### Modeling of Micro X–Y Stage {#modeling-of-micro-x-y-stage} + + +#### Fabrication of the MEMS Micro X–Y Stage {#fabrication-of-the-mems-micro-x-y-stage} + + +### Capacitive Self-Sensing Actuation {#capacitive-self-sensing-actuation} + + +#### Design of CSSA Bridge Circuit {#design-of-cssa-bridge-circuit} + + +#### Experimental Verification {#experimental-verification} + + +### Robust Decoupling Controller Design {#robust-decoupling-controller-design} + + +#### Choice of Pre-Shaping Filters {#choice-of-pre-shaping-filters} + + +#### Controller Synthesis {#controller-synthesis} + + +#### Frequency Responses {#frequency-responses} + + +#### Time Responses {#time-responses} + + +#### Robustness Analysis {#robustness-analysis} + + +### Conclusion {#conclusion} + + +## Conclusions {#conclusions} + +Many secondary actuators have been developed in addition to primary actuators in the field of mechanical actuation systems. +The aim is to provide high performance such as high precision and fast response. +Several types of secondary actuators have been introduced such as PZT milliactuator, electrostatic microactuator, PZT microactuator, and thermal microactuator. +Comparison of these secondary actuators has been made, and these secondary actuators have made dual and multi-stage actuation mechanisms possible. + +Three-stage actuation systems have been proposed for the demand of wider bandwidth, to overcome the limitation by stroke constraint and saturation of secondary actuators. +After the characteristics of the three-stage systems have been developed and the models have been identified, the control strategy and algorithm have been developed to deal with vibrations and meet different requirements. +Particularly, for the three-stage actuation systems, the presented control strategies make it easy to further push the bandwidth and meet the performance requirement. +The control of the thermal microactuator based dual-stage system has been discussed in detail, including linearization and controller design method. + +The developed advanced algorithms applied in the multi-stage systems include \\(\mathcal{H}\_\infty\\) loop shaping, anti-windup compensation, \\(\mathcal{H}\_2\\) control method, +and mixed \\(\mathcal{H}\_2/\mathcal{H}\_\infty\\) control method. +Typical problems of the milli and micro-actuators as the secondary actuators have been considered and appropriate solutions have been presented such as saturation compensation, hysteresis modeling and compensation, stroke limitation, and PZT self-sensing scheme. +Time delay and sampling rate effect on the control performance have been analyzed to help select appropriate sampling rate and design suitable controllers. + +Specific usage of PZT elements has been produced for system performance improvement. +Using PZT elements as a sensor to deal with high-frequency vibration beyond the bandwidth has been proposed and systematic controller design methods have been developed. +As a more advanced concept, PZT elements being used as actuator and sensor simultaneously has also been addressed in this book with detailed scheme and controller design methodology for effective utilization. + +# Bibliography +Du, C., & Pang, C. K., *Multi-stage actuation systems and control* (2019), Boca Raton, FL: CRC Press. [↩](#28e6aa60b8446943c921ff0d6578ac85) diff --git a/content/book/ewins00_modal.md b/content/book/ewins00_modal.md new file mode 100644 index 0000000..fbcf3fd --- /dev/null +++ b/content/book/ewins00_modal.md @@ -0,0 +1,4560 @@ ++++ +title = "Modal testing: theory, practice and application" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [System Identification]({{< relref "system_identification" >}}), [Reference Books]({{< relref "reference_books" >}}) + +Reference +: (Ewins, 2000) + +Author(s) +: Ewins, D. + +Year +: 2000 + + +## Overview {#overview} + + +### Introduction to Modal Testing {#introduction-to-modal-testing} + +The **major objectives** of modal testing are: + +- Determining the nature and extent of vibration response levels in operation +- Verifying theoretical models and predictions of the vibrations +- Measurement of the essential materials properties under dynamic loading, such as damping capacity, friction and fatigue endurance + +For many applications, vibrations is directly related to performance and it is important that the vibration levels are anticipated and brought under satisfactory control. + +The two major vibration measurement objectives corresponds to two types of test: + +1. **Free vibration**: responses are measured during operation of the machine +2. **Forced vibrations**: the structure is vibrated with a known excitation, often out of its normal service environment. This type of testing is generally made under more closely-controlled conditions than the first one, and yields to more accurate information + +Modal Testing means "the processes involved in testing components or structure with the objective of obtaining a mathematical description of their dynamic of vibration behavior". +The form of "mathematical description" can vary from one application to the other: it can be an estimate of natural frequency and damping factor in one case and a full mass-spring-dashpot model for the next. + + +### Applications of modal testing {#applications-of-modal-testing} + +We must remember that no single test or analysis procedure is best for all cases and so it is very important that a clear objective is defined before any industrial test is undertaken so that the optimum methods may be used. + +The different objectives can be: + +1. Measurement of a structure's vibration properties in order to **compare these with a theoretical model** (finite element model for instance). + This is usually used to **validate a model**. What is required for the test are: + + - accurate estimates of natural frequencies + - descriptions of the mode shapes + + At this stage, accurate mode shape data are not essential. It is generally not possible to "predict" the damping in each mode of vibration from a theoretical model. +2. **Adjust or correct the theoretical model** in order to bring its modal properties closer into line with the measured results. + A **correlation** technique can be used: the two sets of data are combined, quantitatively, in order to identify specifically the causes of the discrepancies between predicted and measured properties. This however, requires precise description of the mode shapes (the eigenvectors) from the modal analysis. +3. **Sub-structuring process**: use modal testing in order to produce a mathematical model of a component which may be incorporated into a structural assembly. + Here, as it is a fully quantitative model that is sought, accurate data are required for natural frequencies, modal damping factors and mode shapes. + Also, all modes must be included simultaneously as **out-of-range modes will influence the structure's behavior** in a given frequency range of interest for the complete assembly. This application is altogether more demanding than the previous ones. +4. **Predicting the effects of modifications** to the original structure, as tested. + For this application and the sub-structuring process, one need information about **rotational degrees-of-freedom**, i.e. moments and rotational displacements. + These are generally ignore in experimental-based studies as they are much more difficult to measure. +5. **Force Determination**. There are a number of situations where **knowledge of the dynamic forces causing vibration is required** but where direct measurement of these forces is not practical. + For these cases, one solution is offered by a process whereby measurements of the response caused by the forces are combined with a mathematical description of the transfer functions of the structure in order to deduce the forces. + This process can be very sensitive to the accuracy of the model used, and it is often essential that the model itself be derived from measurements + +Usually, the normal procedure for modal testing is: + +1. **Measure** +2. **Analyze** the measured data +3. **Derive a mathematical model** of the structure + +However, there are some cases where this is not the optimum procedure. +The last step is usually taken in order to reduce a vast quantity of actual measurements to a small and efficient data set usually referred to as the "modal model". +This reduction process has an additional benefit of eliminating small inconsistencies which will inevitably occur in measured data. + + +### Philosophy of Modal Testing {#philosophy-of-modal-testing} + +One of the major requirements to apply modal testing is a thorough integration with an high level of understanding of three components: + +1. **Theoretical basis** of vibration +2. **Accurate measurement** of vibration +3. Realistic and detailed **data analysis** + +For instance, there are many features of a frequency response function that can be assessed rapidly understanding some **theoretical basis**. +This could prevent the wasted effort of analyzing incorrect measurements. + +Then, for the **practical** side, there are many choices of test methods: harmonic, random, transient excitation. +The experimenter should be aware of the limitations and implications of the various techniques used in the measurement phases. + +Next, we consider the **analysis** stage where the measured data (Frequency Response Functions or FRF) are subjected to a range of curve-fitting procedures in an attempt to find the mathematical model which provides the closest description of the actually-observed behavior. +There are many approached, and one should be aware of the alternatives in order to choose the optimal one. + +Often, an analysis may be conducted on each measured curve individually. +Then, there is a further step in the process: **modeling**. +This is the final stage where all the measured and processed data are combined to yield the most compact and efficient mathematical model of the test structure. + +However, the averaging process is a valid and valuable technique only is provided that the data contain **random** vibrations. +Data with systematic trends, such as those causes by poor testing practices or non-linearities, should no be averaged in the same way. + + +### Summary of Theory {#summary-of-theory} + +It is very important that a clear distinction is made between **free** vibrations and **forced** vibration analysis. + +For the SDOF system, a free vibration analysis yields its natural frequency and damping factor, where a forced response analysis (assuming a harmonic excitation), leads to the definition of the frequency response function. +These two types of results are referred to as **modal properties** and **frequency response characteristics**. + +Next, we consider the more general class of systems which have more than one degree-of-freedom. +For these, it is customary that the spatial properties (the values of the mass, stiffness, and damper elements) be expressed as matrices. +Those used are the mass matrix \\(M\\), the stiffness matrix \\(K\\), the viscous damping matrix \\(C\\) and the structural or hysteretic damping matrix \\(D\\). + +There are three phases in the vibration analysis of such systems: + +1. **Setting up the governing equations of motion**, which means determining the elements of the above matrices +2. **Free vibration analysis** using the equations of motion. + This analysis produces first a set of \\(N\\) natural frequencies and damping factors, and secondly a matching set of \\(N\\) "mode shape" vectors, each one of these being associated with a specific natural frequency and damping factor. + The complete free vibration solution is conveniently contained in two matrices \\(h^2\\) and \\(\phi\\), which are again referred to as "modal properties", or sometimes, as the **eigenvalue** and **eigenvector matrices**. + One element from the diagonal eigenvalue matrix \\(\lambda\_r^2\\) contains **both the natural frequency and the damping factor** for the \\(r^{\text{th}}\\) normal mode of vibration of the system while the corresponding column \\(\phi\_r\\) describes the **shape of that same mode of vibration** +3. **Forced response analysis**, and in particular harmonic excitation. + By solving the equations of motion when harmonic forcing is applied, we are able to describe the complete solution by a single matrix, known as the **frequency response matrix** \\(H(\omega)\\). + Thus, element \\(H\_{jk}(\omega)\\) represents the harmonic response, \\(X\_j\\) in one of the DOF \\(j\\) caused by a single harmonic force \\(F\_k\\) applied in the DOF \\(k\\). + The particular relevance of these specific response characteristics is the fact that they are the quantities which are the most likely to be able to **measure in practice**. + However, the same expressions can be **drastically simplified if we use the modal properties** instead of the spatial properties and it is possible to write an expressing for any FRF, \\(H\_{jk}(\omega)\\), which has the general form + + \begin{equation\*} + H\_{jk}(\omega) = \frac{X\_j}{F\_k} = \sum\_{r=1}^{N}\frac{{}\_rA\_{jk}}{\lambda\_r^2 - \omega^2} \label{eq:frf\_modal} + \end{equation\*} + + where \\(\lambda\_r^2\\) is the eigenvalue of the \\(r^{\text{th}}\\) mode, \\({}\_rA\_{jk}\\) (the modal constant) is constructed from \\(\phi\_{jk}\\) which is the \\(j^{\text{th}}\\) element of the \\(r^{\text{th}}\\) eigenvector \\(\phi\_r\\) and \\(N\\) is the number of degrees-of-freedom (or modes). + This expression forms the foundation of modal analysis: it shows a **direct connection between the modal properties of a system and its response characteristics**. + +Thus, we find that by making a thorough study of the theory of structural vibration, we are able to "predict" what we might expect to find if we make FRF measurements on actual hardware. +Indeed, we shall see later how these predictions can be quite detailed, to the point where it is possible to comment on the likely quality of measured data. + + +### Summary of measurement methods {#summary-of-measurement-methods} + +The main measurement technique studied are those which will permit to make **direct measurements of the various FRF** properties of the test structure. + +The type of test best suited to FRF measurement is shown in figure [fig:modal_analysis_schematic](#fig:modal_analysis_schematic). + + + +{{< figure src="/ox-hugo/ewins00_modal_analysis_schematic.png" caption="Figure 1: Basic components of FRF measurement system" >}} + +Essentially, there are three aspect of the measurement process which demand particular attention in order to ensure the acquisition of the high-quality data which are required for the next stage (data analysis). These are: + +1. The mechanical aspect of **supporting** and **correctly exciting the structure** +2. The **correct transduction** of the quantities to be measured (force input and motion response) +3. The **signal processing** which is appropriate to the type of test used + + +##### Mechanical Aspect {#mechanical-aspect} + +We here encounter questions as how the testpiece should be suspended, or supported and how it should be excited. +Usually, one of three options is chosen for the support: + +- **Free or unrestrained**: usually means suspended on very soft springs, this has the advantage that free boundaries are easy to simulate +- **Grounded**: requires rigid clamping at certain points +- **In situ**: the testpiece is connected to some structure representing a non-rigid attachment + +The mechanics of the excitation are achieve either by connection a **vibration generator** or shaker, or by using some form of **transient input**, such as a hammer blow or sudden release from a deformed position. +Both approaches have advantages and disadvantages and it can be very important to choose the best one in each case. + + +##### Transducers {#transducers} + +Transducers are very important elements in the system as it is essential that accurate measurements be made of both the input to the structure and of its response. +Nowadays, **piezoelectric transducers** are widely used to detect both force and acceleration and the major problems associated with them are to ensure that they **interfere as little as possible with the test structure** and that their **performance is adequate for the ranges of frequency and amplitude** of the test. + + +##### Signal Processing {#signal-processing} + +The FRF parameters to be measured can be obtained directly by applying an harmonic excitation and then measuring the resulting harmonic response. +This type of test is often referred to as **sinewave testing** and it requires the attachment of a shaker to the structure. +The frequency range is covered by sweeping the frequency continuously or by step. + +Alternative excitation procedures are now widely used. +Transient (including burst signals) periodic, pseudo-random or **random excitation signals** often replace the signal wave approach and are made practical by the existence of complex signal processing analyser which are capable of resolving the frequency content of both input and response signals using Fourier analysis. + +In modal testing applications of vibrations measurements, **accuracy of the measured data is of paramount importance**. +This is so because this data are generally to be submitted to a range of analysis procedures, in order to extract the results. +Some of these analysis processes are themselves quite complex and can seldom be regarded as insensitive to the accuracy of the input data. + + +### Summary of Modal Analysis Processes {#summary-of-modal-analysis-processes} + +The third skill required for modal testing is concerned with the **analysis of the measured FRF data**. +This is quite separate from the signals processing which may be necessary to convert raw measurements into frequency response. + +It is a procedure whereby the measured mobilities are analyzed in such a way as to find a theoretical model which most closely resembles the behavior of the actual testpiece. +This process itself falls into two stages: + +1. **Identify the appropriate type of model** +2. **Determine the appropriate parameters** of the chosen model + +Most of the effort goes into this second stage, which is widely referred to as "modal parameter extraction", or simply as "modal analysis". + +We have seen that we can predict the form of the FRF plots for a multi degree-of-freedom system, and that these are directly related to the modal properties of that system. +The great majority of the modal analysis effort involves **curve-fitting** an expression such as equation [eq:frf_modal](#eq:frf_modal) to the measured FRF and thereby finding the appropriate modal parameters. + +A completely general curve-fitting approach is possible but generally inefficient. +Mathematically, we can take an equation of the form +\\[ H(\omega) = \sum\_{r=1}^N \frac{A\_r}{\lambda\_r^2 - \omega^2} \\] +and curve fit a set of measured values \\(H\_m(\omega\_1), H\_m(\omega\_2), \dots\\) to this expression so that we obtain estimates for the coefficients \\(A\_1, A\_2, \dots, \lambda\_1^2, \lambda\_2^2, \dots\\). +These coefficients are closely related to the modal properties of the system. +However, although such approaches are made, they are **inefficient** and neither exploit the particular properties of resonant systems nor take due account of the unequal quality of the various measured points in the data set, both of which can have a significant influence on the overall analysis process. +Thus there is **no single modal analysis method**, but rater a selection, each being the most appropriate in differing conditions. + +One of the most widespread and useful approaches is known as the **single-degree-of-freedom curve-fit**, or often as the **circle fit** procedure. +This method uses the fact that **at frequencies close to a natural frequency**, the FRF can often be **approximated to that of a single degree-of-freedom system** plus a constant offset term (which approximately accounts for the existence of other modes). +This assumption allows us to use the circular nature of a modulus/phase polar plot of the frequency response function of a SDOF system (see figure [fig:sdof_modulus_phase](#fig:sdof_modulus_phase)). +This process can be **repeated** for each resonance individually until the whole curve has been analyzed. +At this stage, a theoretical regeneration of the FRF is possible using the set of coefficients extracted. + + + +{{< figure src="/ox-hugo/ewins00_sdof_modulus_phase.png" caption="Figure 2: Curve fit to resonant FRF data" >}} + +These simple methods can be used for many of the cases encountered in practice but they become inadequate and **inaccurate when the structure has mode which are close**. +Under these conditions, it becomes necessary to use a more complex process which accepts the simultaneous influence of more than one mode. +These methods are referred to as **MDOF curve-fits** and are naturally more complicated and require more computation effort but, provided the data are accurate, they have the capability of producing more accurate estimates for the modal properties. + +Some of more detailed considerations include: compensating for slightly non-linear behavior, simultaneously analyzing more than one FRF and curve-fitting to actual time histories. + + +### Review of Test Procedures and Levels {#review-of-test-procedures-and-levels} + +The overall objective of the test is to determine a set of modal properties for a structure. +These consist of natural frequencies, damping factors and mode shapes. +The procedure consists of **three steps**: + +1. **Measure** an appropriate set of mobilities, or FRF +2. **Analyze** these using appropriate curve-fitting procedures +3. **Combine** the results of the curve-fits to construct the required model + +Using our knowledge of the theoretical relationship between FRF functions and modal properties, it is possible to show that an "appropriate" set of FRFs to measure consists in most cases of **just one row or one column in the FRF matrix** \\(H(\omega)\\). +In practice this either means **exciting the structure at one point and measuring responses at all points** or **measuring the response at one point while the excitation is applied separately at each point in turn**. +This last option is most conveniently achieve using a hammer. + +Even though the same overall procedure is always followed, there will be a **different level of detail** required for each different application. + + +## Theoretical Basis {#theoretical-basis} + + +### Introduction {#introduction} + +Theoretical foundations of modal testing are of paramount importance to its successful implementation. + +The three phases through a typical theoretical vibration analysis progresses are shown on figure [fig:vibration_analysis_procedure](#fig:vibration_analysis_procedure). +Generally, we start with a description of the structure's physical characteristics (mass, stiffness and damping properties), this is referred to as the **Spatial model**. + + + +{{< figure src="/ox-hugo/ewins00_vibration_analysis_procedure.png" caption="Figure 3: Theoretical route to vibration analysis" >}} + +Then, it is customary to perform a theoretical modal analysis of the spatial model which leads to a description of the structure's behavior as a set of vibration modes: the **modal model**. + +
+
+ +This model is defined a set of **natural frequencies** with corresponding **modal damping factors** and **vibration mode shapes**. +This solution describes the various ways in which the structure is capable of vibrating **naturally** (without any external forces or excitations), and so these are called the **natural** or **normal** modes of the structure. + +
+ +The third stage is generally that in which we have the greatest interest; namely, the analysis of **exactly how the structure will respond under given excitation conditions**. +It is convenient to present an analysis of the structure's response to a "standard" excitation (from which the solution for **any particular case** can be constructed) and to describe this as the **response model**. +The **standard excitation** chosen is a **unit-amplitude sinusoidal force applied to each point on the structure individually**, and at every frequency within a specified range. +Thus our response model will consist of a set of **frequency response functions**. + +
+
+ +As indicated in figure [fig:vibration_analysis_procedure](#fig:vibration_analysis_procedure), it is also possible to do an analysis in the reverse directly: from a description of the response properties (FRFs), we can deduce modal properties and the spatial properties: this is the **experimental route** to vibration analysis. + +
+ + +### Single Degree of Freedom System Theory {#single-degree-of-freedom-system-theory} + +Although very few practical structures could realistically be modeled by a SDOF system, the properties of such a system are very important because those for a more complex MDOF system can always be represented as a **linear superposition** of a number of SDOF characteristics. + +
+
+ +Three classes of system model will be described: + +- Undamped +- Viscously-damped +- Hysteretically (or structurally) damped + +
+ +The basic model for the SDOF system is shown in figure [fig:sdof_model](#fig:sdof_model) where \\(f(t)\\) and \\(x(t)\\) are general time-varying force and displacement response quantities. +The spatial model consists of a **mass** \\(m\\), a **spring** \\(k\\) and (when damped) either a **viscous dashpot** \\(c\\) or **hysteretic damper** \\(d\\). + + + +{{< figure src="/ox-hugo/ewins00_sdof_model.png" caption="Figure 4: Single degree-of-freedom system" >}} + + +#### Undamped Systems {#undamped-systems} + +The governing equation of motion is + +\begin{equation} + m \ddot{x} + k = 0 +\end{equation} + +The trial solution \\(x(t) = X e^{i\omega t}\\) leads to +\\[ k - m \omega^2 = 0 \\] + +Hence the modal model consists of a single solution (mode of vibration) with a **natural frequency** +\\[ \omega\_0 = \sqrt{k/m} \\] + +Turning next to a **frequency response analysis**, we consider an excitation of the form +\\[ f(t) = F e^{i\omega t} \\] +and assume a solution of the form +\\[ x(t) = X e^{i\omega t} \\] +where \\(F\\) and \\(X\\) are complex. +Now the equation of motion is + +\begin{equation} + (k - m \omega^2) X e^{i\omega t} = F e^{i\omega t} +\end{equation} + +from which we extract the required response model in the form of a **frequency response function**. + +
+
+ +\begin{equation} + \alpha(\omega) = \frac{X}{F} = \frac{1}{x - \omega^2 m} \label{eq:receptance} +\end{equation} + +
+ +This particular form of FRF, where the response parameter is **displacement** (as opposed to velocity of acceleration) is called a **receptance**. + + +#### Viscous Damping {#viscous-damping} + +If we add a **viscous dashpot** \\(c\\), the equation of motion becomes + +\begin{equation} + m \ddot{x} + c \dot{x} + k x = 0 +\end{equation} + +and we must now use a more general trial solution +\\[ x(t) = X e^{st} \\] +where \\(s\\) is **complex**. +We obtain the condition +\\[ ms^2 + cs + k = 0 \\] +which leads to + +\begin{align} + s\_{1, 2} &= -\frac{c}{2 m} \pm \frac{\sqrt{c^2 - 4 k m}}{2 m} \\\\\\ + &= - \bar{\omega}\_0 \xi \pm i \bar{\omega}\_0 \sqrt{1 - \xi^2} \nonumber +\end{align} + +where +\\[ \bar{\omega}\_0^2 = \frac{k}{m};\quad \xi = \frac{c}{c\_0} = \frac{c}{2 \sqrt{km}} \\] + +This implies a modal solution of the form +\\[ x(t) = X e^{-\bar{\omega}\_0 \xi t} e^{i (\bar{\omega}\_0 \sqrt{1 - \xi^2})t} = X e^{-a t} e^{i \omega\_0^\prime t} \\] +which is a single mode of vibration with a complex natural frequency having two part: + +- **An imaginary or oscillatory part** +- **A real or decay part** + +The physical significance of these two parts is illustrated in the typical free response plot shown in figure [fig:sdof_response](#fig:sdof_response) + + + +{{< figure src="/ox-hugo/ewins00_sdof_response.png" caption="Figure 5: Oscillatory and decay part" >}} + +Lastly, we consider the forced response when \\(f(t) = F e^{i\omega t}\\) and, as before, we assume \\(x(t) = Xe^{i\omega t}\\): +\\[ \left( -\omega^2 m + i \omega c + k \right) X e^{i\omega t} = F e^{i \omega t} \\] +gives a receptance FRF of the form + +
+
+ +\begin{equation} + \alpha(\omega) = \frac{1}{(k - \omega^2 m) + i \omega c} +\end{equation} + +which is now complex, containing both magnitude and phase information: + +\begin{subequations} + \begin{align} + \frac{|X|}{|F|} &= \frac{1}{\sqrt{(k - \omega^2 m)^2 + (\omega c)^2}} \\\\\\ + \angle{X} - \angle{F} &= \text{tg}^{-1} \left( \frac{\omega c}{k - \omega^2 m} \right) + \end{align} +\end{subequations} + +
+ + +#### Structural Damping {#structural-damping} + +All structures exhibit a degree of damping due to the **hysteresis properties** of the material(s) from which they are made. + +A typical example of this effect is shown in the force displacement plot in figure [fig:material_histeresis](#fig:material_histeresis) in which the **area contained by the loop represents the energy lost in one cycle of vibration** between the extremities shown. +The maximum energy stored corresponds to the elastic energy of the structure at the point of maximum deflection. +The damping effect of such a component can conveniently be defined by the ratio of these two: +\\[ \tcmbox{\text{damping capacity} = \frac{\text{energy lost per cycle}}{\text{maximum energy stored}}} \\] + + +
+ Table 1: + Force-deflection characteristics +
+ +| ![](/ox-hugo/ewins00_material_histeresis.png) | ![](/ox-hugo/ewins00_dry_friction.png) | ![](/ox-hugo/ewins00_viscous_damper.png) | +|-----------------------------------------------|----------------------------------------|------------------------------------------| +| Material hysteresis | Dry friction | Viscous damper | +| height=2cm | height=2cm | height=2cm | + +Another common source of energy dissipation in practical structures, is the **friction** which exist in joints between components of the structure. +It may be described very roughly by the simple **dry friction model** shown in figure [fig:dry_friction](#fig:dry_friction). + +The mathematical model of the **viscous damper** which we have used can be compared with these more physical effects by plotting the corresponding force-displacement diagram for it, and this is shown in figure [fig:viscous_damper](#fig:viscous_damper). +Because the relationship is linear between force and velocity, it is necessary to suppose harmonic motion, at frequency \\(\omega\\), in order to construct a force-displacement diagram. +The resulting diagram shows the nature of the approximation provided by the viscous damper model and the concept of the **effective or equivalent viscous damping coefficient** for any of the actual phenomena as being which provides the **same energy loss per cycle** as the real thing. + +
+
+ +The problem which arises with the **viscous damping model** is that it has a **frequency-dependence in the amount of energy loss per cycle** whereas the dry friction mechanism is clearly unaffected by the frequency of loading and experiments suggests that the hysteresis effect is similarly independent of frequency. +Thus, we find a problem in obtaining a single equivalent viscous dashpot model which will be **valid over a range of frequencies**, such as will be necessary to represent the damping of a MDOF system over all, or at least several, of its modes of vibration. + +
+ +An alternative theoretical damping model is provided by the **hysteretic** or **structural damper** which not only has the advantage that the **energy lost per cycle is independent of the frequency**, but also provides a much simpler analysis for MDOF systems. +However, it presents difficulties to a rigorous free vibration analysis and its application is generally focused on the forced response analysis. +In this case, we can write an equation of motion: +\\[ (-\omega^2 m + k + i d) X e^{i\omega t} = F e^{i \omega t} \\] + +
+
+ +\begin{equation} + \alpha(\omega) = \frac{1/k}{1 - \left(\omega/\bar{\omega}\_0\right)^2 + i \eta} +\end{equation} + +where \\(\eta\\) is the **structural damping loss factor** and replaces the critical damping ratio used for the viscous damping model. + +
+ + +### Presentation and Properties of FRF data for SDOF system {#presentation-and-properties-of-frf-data-for-sdof-system} + + +#### Alternative Forms of FRF {#alternative-forms-of-frf} + +So far we have defined our receptance frequency response function \\(\alpha(\omega)\\) as the ratio between a harmonic displacement response and the harmonic force [eq:receptance](#eq:receptance). +This ratio is complex: we can look at its **amplitude** ratio \\(|\alpha(\omega)|\\) and its **phase** angle \\(\theta\_\alpha(\omega)\\). + +We could have selected the response velocity \\(v(t)\\) as the output quantity and defined an alternative frequency response function [eq:mobility](#eq:mobility). +Similarly we could use the acceleration parameter so we could define a third FRF parameter [eq:inertance](#eq:inertance). + +
+
+ +\begin{equation} + \text{mobility} = Y(\omega) = \frac{V}{F} = i \omega \alpha(\omega) \label{eq:mobility} +\end{equation} + +
+ +
+
+ +\begin{equation} + \text{inertance} = A(\omega) = \frac{A}{F} = - \omega^2 \alpha(\omega) \label{eq:inertance} +\end{equation} + +
+ +Table [tab:frf_alternatives](#tab:frf_alternatives) gives details of all six of the FRF parameters and of the names used for them. + +**Inverse response** can also be defined. For instance, the **dynamic stiffness** is defined as the force over the displacement. + +
+
+ +\begin{equation} + \frac{\text{force}}{\text{displacement}} = (k - \omega^2 m) + (i \omega c) \label{eq:dynamic\_stiffness} +\end{equation} + +
+ +It should be noted that that the use of displacement as the response is greatly encouraged as the other options can lead to confusion when used in MDOF system. + + +
+ Table 2: + Definition of Frequency Response Functions +
+ +| | **Standard FRF** | **Inverse FRF: FIR** | +|-----------|---------------------|----------------------| +| **Disp**. | Receptance | Dynamic Stiffness | +| | Admittance | | +| | Dynamic compliance | | +| | Dynamic flexibility | | +| **Vel**. | Mobility | Mechanical Impedance | +| **Acc**. | Accelerance | Apparent Mass | +| | Inertance | | + + +#### Graphical Displays of FRF Data {#graphical-displays-of-frf-data} + +FRF data are complex and thus there are three quantities (frequency and two parts of the complex function) to display. +Any simple plot can only show two of the three quantities and so there are different possibilities available for the presentation of such data: + +1. Modulus of FRF vs Frequency and Phase of FRF vs Frequency: the **Bode plot** +2. Real Part of FRF vs Frequency and Imaginary Part of FRF vs Frequency +3. Real Part of reciprocal FRF vs Frequency and Imaginary part of reciprocal FRF vs Frequency +4. Real part of FRF vs Imaginary part of FRF: the **Nyquist plot** + + +##### Bode Plot {#bode-plot} + +Bode plot are usually displayed using logarithmic scales as shown on figure [fig:bode_plots](#fig:bode_plots). + + +
+ Table 3: + FRF plots for undamped SDOF system +
+ +| ![](/ox-hugo/ewins00_bode_receptance.png) | ![](/ox-hugo/ewins00_bode_mobility.png) | ![](/ox-hugo/ewins00_bode_accelerance.png) | +|-------------------------------------------|-----------------------------------------|--------------------------------------------| +| Receptance FRF | Mobility FRF | Accelerance FRF | +| width=\linewidth | width=\linewidth | width=\linewidth | + +Each plot can be divided into three regimes: + +- a low frequency straight line characteristic +- a high frequency straight line characteristic +- the resonant region with its abrupt magnitude and phase variations + + +##### Real part and Imaginary part of FRF {#real-part-and-imaginary-part-of-frf} + +Real and imaginary part of a receptance FRF of a damped SDOF system is shown on figure [fig:plot_receptance_real_imag](#fig:plot_receptance_real_imag). +This type of display is not widely used as we cannot use logarithmic axes (as we have to show positive and negative values). + + +
+ Table 4: + Plot of real and imaginary part for the receptance of a damped SDOF +
+ +| ![](/ox-hugo/ewins00_plot_receptance_real.png) | ![](/ox-hugo/ewins00_plot_receptance_imag.png) | +|------------------------------------------------|------------------------------------------------| +| Real part | Imaginary part | +| width=\linewidth | width=\linewidth | + + +##### Real part and Imaginary part of reciprocal FRF {#real-part-and-imaginary-part-of-reciprocal-frf} + +It can be seen from the expression of the inverse receptance [eq:dynamic_stiffness](#eq:dynamic_stiffness) that the Real part depends entirely on the mass and stiffness properties while the Imaginary part is a only function of the damping. + +Figure [fig:inverse_frf_mixed](#fig:inverse_frf_mixed) shows an example of a plot of a system with a combination of both viscous and structural damping. The imaginary part is a straight line whose slope is given by the viscous damping rate \\(c\\) and whose intercept at \\(\omega = 0\\) is provided by the structural damping coefficient \\(d\\). + + +
+ Table 5: + Inverse FRF plot for the system +
+ +| ![](/ox-hugo/ewins00_inverse_frf_mixed.png) | ![](/ox-hugo/ewins00_inverse_frf_viscous.png) | +|---------------------------------------------|-----------------------------------------------| +| Mixed | Viscous | +| width=\linewidth | width=\linewidth | + + +##### Real part vs Imaginary part of FRF {#real-part-vs-imaginary-part-of-frf} + +Figure [fig:nyquist_receptance](#fig:nyquist_receptance) shows Nyquist type FRF plots of a viscously damped SDOF system. +The missing information (in this case, the frequency) must be added by identifying the values of frequency corresponding to particular points on the curve. + + +
+ Table 6: + Nyquist FRF plots of the mobility for a SDOF system +
+ +| ![](/ox-hugo/ewins00_nyquist_receptance_viscous.png) | ![](/ox-hugo/ewins00_nyquist_receptance_structural.png) | +|------------------------------------------------------|---------------------------------------------------------| +| Viscous damping | Structural damping | +| width=\linewidth | width=\linewidth | + +The Nyquist plot has the particularity of distorting the plot so as to focus on the resonance area. +This makes the Nyquist plot very effective for modal testing applications. + + +### Undamped MDOF Systems {#undamped-mdof-systems} + + +#### Free Vibration Solution - The modal Properties {#free-vibration-solution-the-modal-properties} + +For an undamped MDOF system, with \\(N\\) degrees of freedom, the governing equations of motion can be written in matrix form [eq:undamped_mdof](#eq:undamped_mdof). + +
+
+ +\begin{equation} + [M] \\{\ddot{x}(t)\\} + [K] \\{x(t)\\} = \\{f(t)\\} \label{eq:undamped\_mdof} +\end{equation} + +where \\([M]\\) and \\([K]\\) are \\(N\times N\\) mass and stiffness matrices, and \\(\\{x(t)\\}\\) and \\(\\{f(t)\\}\\) are \\(N\times 1\\) vectors of time-varying displacements and forces. + +
+ +We shall consider first the free vibration solution by taking \\(f(t) = 0\\). +In this case, we assume that a solution exists of the form \\(\\{x(t)\\} = \\{X\\} e^{i \omega t}\\) where \\(\\{X\\}\\) is an \\(N \times 1\\) vector of time-independent amplitudes. +Substitution of this condition into [eq:undamped_mdof](#eq:undamped_mdof) leads to + +\begin{equation} + \left( [K] - \omega^2 [M] \right) \\{X\\} e^{i\omega t} = \\{0\\} +\end{equation} + +for which the non trivial solutions are those which satisfy +\\[ \det \left| [K] - \omega^2 [M] \right| = 0 \\] +from which we can find \\(N\\) values of \\(\omega^2\\) corresponding to the undamped system's **natural frequencies**. + +Substituting any of these back into [eq:free_eom_mdof](#eq:free_eom_mdof) yields a corresponding set of relative values for \\(\\{X\\}\\): \\(\\{\psi\\}\_r\\) the so-called **mode shape** corresponding to that natural frequency. + +
+
+ +The complete solution can be expressed in two \\(N \times N\\) **eigen matrices**. + +\\[ \begin{bmatrix} + \omega\_1^2 & & 0 \\\\\\ + & \ddots & \\\\\\ + 0 & & \omega\_n^2 +\end{bmatrix}; \quad \Psi = \begin{bmatrix} + & & \\\\\\ + \\{\psi\_1\\} & \dots & \\{\psi\_n\\} \\\\\\ + & & +\end{bmatrix} \\] + +where \\(\bar{\omega}\_r^2\\) is the \\(r^\text{th}\\) **eigenvalue squared** and \\(\\{\psi\\}\_r\\) is a description of the corresponding **mode shape** + +
+ +Various numerical procedures are available which take the system matrices \\([M]\\) and \\([K]\\) (the **Spatial Model**), and convert them to the two eigen matrices \\([\bar{\omega}\_r^2]\\) and \\([\Psi]\\) which constitute the **Modal Model**. + +It is important to realize that whereas the eigenvalue matrix is unique, the eigenvector matrix is **not**. +Indeed, the natural frequencies are fixed quantities, and the mode shapes are subject to an **indeterminate scaling factor**. + + +#### Orthogonality Properties {#orthogonality-properties} + +
+
+ +The modal model possesses some very important properties known as the **orthogonality** properties: + +\begin{subequations} + \begin{align} + [\Psi]^T[M][\Psi] &= [m\_r]\\\\\\ + [\Psi]^T[K][\Psi] &= [k\_r] + \end{align} +\end{subequations} + +from which \\([\bar{\omega}\_r^2] = [m\_r]^{-1} [k\_r]\\) where \\(m\_r\\) and \\(k\_r\\) are often referred to as the **modal mass** and **modal stiffness** of mode \\(r\\). + +
+ +Now, because the eigenvector matrix is subject to an **arbitrary scaling factor**, the values of \\(m\_r\\) and \\(k\_r\\) are not unique. + +Among the many scaling or normalization processes, the **mass-normalization** has the most relevance. + +
+
+ +The mass-mormalized eigenvectors are written as \\([\Phi]\\) and have the particular property that + +\begin{subequations} + \begin{align} + [\Phi]^T[M][\Phi] &= [I]\\\\\\ + [\Phi]^T[K][\Phi] &= [\bar{\omega}\_r^2] + \end{align} +\end{subequations} + +
+ +The relationship between the mass-normalized mode shape for mode \\(r\\) \\(\\{\Phi\\}\_r\\) and its more general form \\(\\{\Psi\\}\_r\\) is simply + +\begin{equation} + \\{\Phi\\}\_r = \frac{1}{\sqrt{m\_r}} \\{\Psi\\}\_r +\end{equation} + + +#### Modal, Generalized and Effective Mass and Stiffness {#modal-generalized-and-effective-mass-and-stiffness} + +The **modal mass** is based on the mode shape vector for mode \\(r\\) and the system mass matrix. +As mentioned, there is no unique value for the modal mass as it is directly related to the scaling method which has been used to determine the mode shape eigenvector \\(\\{\Psi\\}\_r\\). +However, the ratio between any modal stiffness and its associated modal mass is unique and is equal to the corresponding eigenvalue. +The modal mass is generally used to convert the original mode shape vector \\(\\{\Psi\\}\_r\\) to the more useful mass-normalized mode shape vector \\(\\{\Phi\\}\_r\\). + +
+
+ +Using the mass-normalized mode shape vectors, we can see how to derive quantities which provide us with information about the **effective mass** (or stiffness) at any point on the structure (any DOF \\(j\\)). +The effective mass at DOF \\(j\\) for mode \\(r\\) is define as +\\[ (m\_{jj})\_r = \frac{1}{(\phi\_{jr})^2}, \text{ which as units of mass} \\] +and the effective stiffness at DOF \\(j\\) for mode \\(r\\) +\\[ (k\_{jj})\_r = \frac{\bar{\omega}\_r^2}{(\phi\_{jr}^2)} \\] + +
+ +It can be seen that since the mass-normalized eigenvectors are unique, these effective mass and stiffness properties are also unique and represent a useful description of the behavior of the structure point by point, and mode by mode. + +
+
+ +The other quantities which are sometimes referred to as unique properties of each mode are the **generalized mass** and **generalized stiffness**. +The generalized mass (or stiffness) of the \\(r^{\text{th}}\\) mode is defined as the effective mass (or stiffness) at the DOF with the largest amplitude of response. +This quantity serves to provide a comparison of the **relative strength of each mode of the structure**. + +
+ + +#### Repeated Roots or Multiple Modes {#repeated-roots-or-multiple-modes} + +There are situations where two (or more) different modes will have the **same natural frequency**. +This occurs frequently in structures which exhibit a degree of **symmetry** (especially axi-symmetry). +In these cases, there is no guarantee that the corresponding eigenvectors are orthogonal. +However, linear combinations of these vectors can always be found such that orthogonality is observed between the mode shapes. +It should be noted, that free vibration at that frequency is possible not only in each of the two vectors thus defined, but also in a deformation pattern which is given by **any linear combination** of these two vectors. + + +#### Force Response Solution - The FRF Characteristics {#force-response-solution-the-frf-characteristics} + +Let's consider the case where the structure is excited sinusoidally by a set of forces all at the same frequency \\(\omega\\), but with individual amplitudes and phases: \\(\\{f(t)\\} = \\{F\\} e^{i\omega t}\\). + +Assuming solutions of the form \\(\\{x(t)\\} = \\{X\\} e^{i \omega t}\\), equation of motion then becomes +\\[ \left( [K] - \omega^2 [M] \right) \\{X\\}e^{i\omega t} = \\{F\\}e^{i\omega t} \\] +That can be written in the following form: +\\[ \\{X\\} = \left( [K] - \omega^2 [M] \right)^{-1} \\{F\\} \\] + +
+
+ +We define the \\(N \times N\\) receptance FRF matrix as +\\[ [\alpha(\omega)] = \left( [K] - \omega^2 [M] \right)^{-1} \\] + +It constitutes the **Response Model** of the system. + +The general element in the receptance FRF matrix \\(\alpha\_{jk}(\omega)\\) is defined as follows +\\[ \alpha\_{jk}(\omega) = \frac{X\_j}{F\_k}, \quad F\_m = 0, m = 1 \dots N \neq k \\] + +
+ +It is possible to determine values for the elements of \\([\alpha(\omega)]\\) at any frequency of interest by computing the inverse system matrix \\([K] - \omega^2[M]\\) at each frequency. +This has several disadvantages: + +- it becomes costly for large-order systems +- it is inefficient if only a few of the individual FRF are required +- it provides no insight into the form of the various FRF properties + +An alternative means of deriving the FRF parameters is used which makes use of the **modal properties instead of the spatial properties**. +\\[ [K] - \omega^2 [M] = [\alpha(\omega)]^{-1} \\] +Pre-multiply both sides by \\([\Phi]^T\\) and post-multiply both sides by \\([\Phi]\\) to obtain +\\[ [\Phi]^T ([K] - \omega^2 [M]) [\Phi] = [\Phi]^T [\alpha(\omega)]^{-1} [\Phi] \\] +which leads to [eq:receptance_modal](#eq:receptance_modal). + +
+
+ +\begin{equation} + [\alpha(\omega)] = [\Phi] \left[ \bar{\omega}\_r^2 - \omega^2 \right]^{-1} [\Phi]^T \label{eq:receptance\_modal} +\end{equation} + +Equation [eq:receptance_modal](#eq:receptance_modal) permits us to compute any individual FRF parameters \\(\alpha\_{jk}(\omega)\\) using the following formula + +\begin{subequations} + \begin{align} + \alpha\_{jk}(\omega) &= \sum\_{r=1}^N \frac{\phi\_{jr} \phi\_{kr}}{\bar{\omega}\_r^2 - \omega^2}\\\\\\ + &= \sum\_{r=1}^N \frac{\psi\_{jr} \psi\_{kr}}{m\_r (\bar{\omega}\_r^2 - \omega^2)}\\\\\\ + &= \sum\_{r=1}^N \frac{{}\_rA\_{jk}}{\bar{\omega}\_r^2 - \omega^2} + \end{align} +\end{subequations} + +where \\({}\_rA\_{jk}\\) is called the **modal constant**. + +
+ +
+
+ +It is clear from equation [eq:receptance_modal](#eq:receptance_modal) that the receptance matrix \\([\alpha(\omega)]\\) is **symmetric** and this will be recognized as the **principle of reciprocity**. + +This principle of reciprocity applies to many structural characteristics. + +Its implications in this situation are that + +\begin{equation} + \alpha\_{jk} = X\_j/F\_k = \alpha\_{kj} = X\_k/F\_j \label{eq:principle\_reciprocity} +\end{equation} + +
+ + +### MDOF Systems with Proportional Damping {#mdof-systems-with-proportional-damping} + + +#### General Concept and Features of Proportional Damping {#general-concept-and-features-of-proportional-damping} + +The modes of a structure with proportional damping are almost identical to those of the undamped version of the model. +Specifically, the **mode shapes are identical** and the **natural frequencies are very similar**. + +The equations of motion for an MDOF system with viscous damping is + +\begin{equation} + [M]\\{\ddot{x}\\} + [C]\\{\dot{x}\\} + [K]\\{x\\} = \\{f\\} +\end{equation} + +Let's first study the special case where the damping matrix is directly **proportional to the stiffness matrix**: +\\[ [C] = \beta [K] \\] +In this case, we have that +\\[ [\Psi]^T [C] [\Psi] = \beta [k\_r] = [c\_r] \\] +where the diagonal elements \\(c\_{jj}\\) represent the **modal damping** of the various modes of the system. +The fact that this matrix is also diagonal means that the **undamped system mode shapes are also those of the damped system**, and this is a particular feature of this type of damping. + +For the forced response analysis, we obtain +\\[ [\alpha(\omega)] = [ K + i \omega C - \omega^2 M ]^{-1} \\] +or + +\begin{equation} + \alpha\_{jk}(\omega) = \sum\_{r=1}^N \frac{\psi\_{jr}\psi\_{kr}}{(k\_r - \omega^2 m\_r) + i \omega c\_r} +\end{equation} + + +#### General Forms of Proportional Damping {#general-forms-of-proportional-damping} + +If the damping matrix is proportional to the mass matrix, exactly the same type of result is obtained. +A usual definition of proportional damping is that the damping matrix \\([C]\\) should be of the form + +\begin{equation} + \tcmbox{[C] = \beta [K] + \gamma [M]} +\end{equation} + +In this case, the damped system will have eigenvalues and eigenvectors as follows + +\begin{align\*} + &\omega\_r^\prime = \bar{\omega}\_r \sqrt{1 - \xi\_r^2} ; \quad \xi\_r = \frac{\beta \bar{\omega}\_r}{2} + \frac{\gamma}{2 \bar{\omega}\_r}\\\\\\ + &[\Psi\_\text{damped}] = [\Psi\_\text{undamped}] +\end{align\*} + +Distributions of damping of this type are sometimes, tough not always, found to be plausible from a practical standpoint. +The actual damping mechanisms are usually to be found in parallel with stiffness elements (for **internal material of hysteresis damping**) or with mass elements (for **friction damping**). + +Identical treatment can be made of an MDOF system with proportional **hysteretic damping**. +If the general system equations of motion are expressed as +\\[ [M]\\{\ddot{x}\\} + [K + iD]\\{x\\} = \\{f\\} \\] +and the hysteretic damping matrix \\([D]\\) has the form + +\begin{equation} + [D] = \beta [K] + \gamma [M] +\end{equation} + +then, we find that the mode shapes for the damped system are again identical to those of the undamped system and that the eigenvalues take the complex form: +\\[ \lambda\_r^2 = \bar{\omega}\_r^2 (1 + i \eta\_r); \quad \bar{\omega}\_r^2 = \frac{k\_r}{m\_r}; \quad \eta\_r = \beta + \frac{\gamma}{\bar{\omega}\_r^2} \\] +and the general FRF is written as + +\begin{equation} + \alpha\_{jk}(\omega) = \sum\_{r=1}^N \frac{\psi\_{jr}\psi\_{kr}}{(k\_r - \omega^2 m\_r) + i \eta\_r k\_r} +\end{equation} + + +### MDOF Systems with Structural (Hysteretic) Damping {#mdof-systems-with-structural--hysteretic--damping} + + +#### Free Vibration Solution - Complex Modal Properties {#free-vibration-solution-complex-modal-properties} + +We start by writing the general equation of motion for an MDOF system with hysteretic damping and harmonic excitation: +\\[ [M] \\{\ddot{x}\\} + [K]\\{x\\} +i[D]\\{x\\} = \\{F\\}e^{i\omega t} \\] + +We consider first the case where there is no excitation and assume a solution of the form +\\[ \\{x\\} = \\{X\\} e^{i \lambda t} \\] +where \\(\lambda\\) is complex. + +We then obtain **complex** eigenvalues and eigenvectors. + +We choose to write the \\(r^\text{th}\\) eigenvalue as +\\[ \lambda\_r^2 = \omega\_r^2(1 + i \eta\_r) \\] +where \\(\omega\_r\\) is the **natural frequency** and \\(\eta\_r\\) is the **damping loss factor** for that mode. +The natural frequency \\(\omega\_r\\) is not necessarily equal to the natural frequency of the undamped system \\(\bar{\omega}\_r\\) although they are very close in practice. + +The eigensolution can be seen to possess the same type of **orthogonality properties** as those demonstrated earlier for the undamped system: +\\[ [\Psi]^T[M][\Psi] = [m\_r] ; \quad [\Psi]^T[K + iD][\Psi] = [k\_r] \\] + +The modal mass and stiffness parameters are now complex but still obey the relationship +\\[ \lambda\_r^2 = \frac{k\_r}{m\_r} \\] +and we define the mass-normalized eigenvectors +\\[ \\{\phi\\}\_r = \frac{1}{\sqrt{m\_r}} \\{\psi\\}\_r \\] + + +#### Forced Response Solution - FRF Characteristics {#forced-response-solution-frf-characteristics} + +For the forced vibration analysis in the case of harmonic excitation and response, we obtain + +\begin{equation} + \\{X\\} = \left( [K] + i[D] - \omega^2[M] \right)^{-1} \\{F\\} +\end{equation} + +By multiplying both sides of the equation by the eigenvectors, we can write + +\begin{equation} + [\alpha(\omega)] = [\Phi] [\lambda\_r^2 - \omega^2]^{-1} [\Phi]^T +\end{equation} + +From this full matrix equation, we have: + +\begin{align\*} + \alpha\_{jk}(\omega) &= \sum\_{r=1}^N \frac{\phi\_{jr} \phi\_{kr}}{\omega\_r^2 - \omega^2 + i \eta\_r \omega\_r^2}\\\\\\ + &= \sum\_{r=1}^N \frac{\psi\_{jr} \psi\_{kr}}{m\_r\left(\omega\_r^2 - \omega^2 + i \eta\_r \omega\_r^2\right)}\\\\\\ + &= \sum\_{r=1}^N \frac{{}\_rA\_{jk}}{\omega\_r^2 - \omega^2 + i \eta\_r \omega\_r^2}\\\\\\ +\end{align\*} + + +#### Excitation by a general Force Vector {#excitation-by-a-general-force-vector} + + +##### Operating deflection shape (ODS) {#operating-deflection-shape--ods} + +Having derived an expression for the general term in the frequency response function matrix \\(\alpha\_{jk}(\omega)\\), it is appropriate to consider next the analysis of a situation where the system is **excited simultaneously at several points**. + +The general behavior for this case is governed by equation [eq:force_response_eom](#eq:force_response_eom) with solution [eq:force_response_eom_solution](#eq:force_response_eom_solution). +However, a more explicit form of the solution is + +\begin{equation} + \\{X\\} = \sum\_{r=1}^N \frac{\\{\phi\\}\_r^T \\{F\\} \\{\phi\\}\_r}{\omega\_r^2 - \omega^2 + i \eta\_r \omega\_r^2} +\end{equation} + +This equation permits the calculation of one or more individual **responses to an excitation of several simultaneous harmonic forces** (all of which must have the same frequency but may vary in magnitude and phase). +The resulting vector of responses is sometimes referred to as **force vibration mode**, or more commonly, as an **Operating Deflection Shape** (**ODS**). + + +##### Pure mode excitation 1 - Damped system normal modes {#pure-mode-excitation-1-damped-system-normal-modes} + +It is possible to choose the vector of individual forces such that the response of the structure is entirely controlled by a **single normal mode** of the structure. + +
+
+ +The **normal modes** are the characteristic modes of the structure in its actual (damped) state. +While it is possible to talk of the modes "that the structure would have if the damping could be removed", these are not the "normal" modes of the structures. +The properties of the normal modes of the undamped system are of interest because in most cases of test-analysis comparison, the analytical model will be undamped and so there is a desired to be able to extract the test structures "undamped" modes from the test data in order to do a direct comparison between prediction and measurement. + +
+ +We are seeking an excitation vector \\(\\{F\\}\\) such that the **response** \\(\\{X\\}\\) **consists of a single modal component** so that all terms in [eq:ods](#eq:ods) but one is zero. +This can be attained if \\(\\{F\\}\\) is chosen such that +\\[ \\{\phi\_r\\}^T \\{F\\}\_s = 0, \ r \neq s \\] + + +##### Pure mode excitation 2 - Associated undamped system normal modes {#pure-mode-excitation-2-associated-undamped-system-normal-modes} + +We here consider an excitation vector of **mono-phased forces**. +We basically impose that all forces have the same frequency and phase. +What is of interest in this case it to see that there exist conditions under which it is possible to obtain a **similarly mono-phase response**. + +Let the force and response vectors be represented by + +\begin{align\*} + \\{f\\} &= \\{\hat{F}\\} e^{i\omega t} \\\\\\ + \\{x\\} &= \\{\hat{X}\\} e^{i(\omega t - \theta)} +\end{align\*} + +where both \\(\\{\hat{F}\\}\\) and \\(\\{\hat{X}\\}\\) are vectors of real quantities. +Substituting these into the equation of motion leads to a complex equation which can be split into real and imaginary parts to give + +\begin{align\*} + \left( (-\omega^2 [M] + [K]) \cos \theta + [D] \sin \theta \right) \\{\hat{X}\\} &= \\{\hat{F}\\} \\\\\\ + \left( (-\omega^2 [M] + [K]) \sin \theta - [D] \cos \theta \right) \\{\hat{X}\\} &= \\{0\\} +\end{align\*} + +We can show that if we consider that the phase lag between all the forces and all the responses is exactly \\(\SI{90}{\degree}\\), the equation reduces to +\\[ (-\omega^2 [M] + [K])\\{\hat{X}\\} = 0 \\] +which is clearly the equation to be solved to find the undamped system natural frequencies and the mode shapes. + +Thus we have the important results that it is always possible to find a set of mono-phased forces which will cause a mono-phased set of responses and, moreover, if these two sets are separated by exactly \\(\SI{90}{\degree}\\), then the frequency at which the system is vibrating is identical to one of its **undamped natural frequencies** and the displacement shape is the corresponding **undamped mode shape**. + +This most important result is the basic for many of the multi-shaker test procedures used to isolate the undamped modes of the structures for comparison with theoretical predictions. +The physics of the technique are quite simple: the force vector is chosen so that it exactly balances all the damping forces. + + +##### Postscript {#postscript} + +It is often observed that the analysis for hysteretic damping is less than rigorous when applied to the free vibration situation, as we have done above. +However, it is an admissible model of damping for describing harmonic forced vibration and this is the objective of most of our studies. +Moreover, it is always possible to express each of the receptance expression either as a ratio of two polynomials or as a series of simple terms. +Each of the terms in the series may be identified with one of the modes we have defined in the earlier free vibration analysis for the system. +Thus, whether or not the solution is strictly valid for a free vibration analysis, we can usefully and confidently consider each of the uncoupled terms or modes as being a genuine characteristic of the system. +As will be seen in the next section, the analysis required for the general case of viscous damping, which is more rigorous, is considerably more complicated than that used here which is, in effect, a very simple extension of the undamped case. + + +### MDOF systems with Viscous Damping {#mdof-systems-with-viscous-damping} + + +#### Free vibration solution - Complex modal properties {#free-vibration-solution-complex-modal-properties} + +The general equation of motion for an MDOF system with viscous damping is + +\begin{equation} + [M] \\{\ddot{x}\\} + [C]\\{\dot{x}\\} + [K]\\{x\\} = \\{f\\} +\end{equation} + +We first consider the case with no excitation, and consider solutions of the form +\\[ \\{x\\} = \\{X\\} e^{st} \\] + +Substituting this into the equation of motion gives +\\[ \left( s^2 [M] + s[C] + [K] \right) \\{X\\} = \\{0\\} \\] + +There are now \\(2N\\) eigenvalues \\(s\_r\\) (as opposed to \\(N\\) values of \\(\lambda\_r^2\\) before) but these now occur in **complex conjugate pairs**. +The eigenvectors also occur as **complex conjugates**. +The eigensolution can thus be described as +\\[ s\_r, s\_r^\*, \quad \\{\psi\\}\_r, \\{\psi\\}\_r^\*;\quad r = 1, N \\] + +Each eigenvalue \\(s\_r\\) is expressed in the form +\\[ s\_r = \omega\_r \left( -\xi\_r + i\sqrt{1 - \xi\_r^2} \right) \\] +where \\(\omega\_r\\) is the **natural frequency** and \\(\xi\_r\\) is the **critical damping ratio** for that mode. + +**Orthogonality equations** can also be derived: + +\begin{subequations} + \begin{align} + (s\_r + s\_q) \\{\psi\\}\_q^T [M] \\{\psi\\}\_r + \\{\psi\\}\_q^T [C] \\{\psi\\}\_r &= 0 \\\\\\ + s\_r s\_q \\{\psi\\}\_q^T [M] \\{\psi\\}\_r - \\{\psi\\}\_q^T [K] \\{\psi\\}\_r &= 0 + \end{align} +\end{subequations} + +When the modes \\(r\\) and \\(q\\) are a complex conjugate pair: +\\[ s\_r = \omega\_r \left( -\xi\_r - i\sqrt{1 - \xi\_r^2} \right); \quad \\{\psi\\}\_q = \\{\psi\\}\_r^\* \\] + +From equations [eq:viscous_damping_orthogonality](#eq:viscous_damping_orthogonality), we can obtain + +\begin{subequations} + \begin{align} + 2 \omega\_r \xi\_r &= \frac{\\{\psi\\}\_r^H [C] \\{\psi\\}\_r}{\\{\psi\\}\_r^H [M] \\{\psi\\}\_r} = \frac{c\_r}{m\_r} \\\\\\ + \omega\_r^2 &= \frac{\\{\psi\\}\_r^H [K] \\{\psi\\}\_r}{\\{\psi\\}\_r^H [M] \\{\psi\\}\_r} = \frac{k\_r}{m\_r} + \end{align} +\end{subequations} + + +#### Forced response solution {#forced-response-solution} + +Assuming a harmonic response \\(\\{x(t)\\} = \\{X\\} e^{i\omega t}\\), we can write the forced response solution directly as +\\[ \\{X\\} = \big( [K] - \omega^2 [M] + i\omega [C] \big)^{-1} \\{F\\} \\] +but this expression is not particularly convenient for numerical applications. + +We then seek a similar series expansion that was found for the undamped, proportionally-damped and hysteretically damped systems. + +The obtain result is +\\[ \alpha\_{jk}(\omega) = \sum\_{r=1}^N \frac{({}\_rR\_{jk}) + i (\omega/\omega\_r)({}\_rS\_{jk})}{\omega\_r^2 - \omega^2 + 2 i \omega \omega\_r \xi\_r} \\] +where the coefficients \\(R\\) and \\(S\\) are obtained from: + +\begin{align\*} + \\{{}\_rR\_k\\} &= 2 \left( \xi\_r \text{Re}\\{{}\_rG\_k\\} - \text{Im}\\{{}\_rG\_k\\} \sqrt{1 - \xi\_r^2} \right)\\\\\\ + \\{{}\_rS\_k\\} &= 2 \text{Re}\\{{}\_rG\_k\\}\\\\\\ + \\{{}\_rG\_k\\} &= (\theta\_{kr}/a\_r) \\{\theta\\}\_r +\end{align\*} + +The main difference with the result obtained with the proportional damping is in the **frequency dependence of the numerator** in case of viscous damping. + + +### Complex Modes {#complex-modes} + + +#### Real and Complex modes, stationary and traveling waves {#real-and-complex-modes-stationary-and-traveling-waves} + +We saw that we can obtain complex eigenvalues whose real part represents the decay and imaginary part the oscillatory component. +We can also obtain **complex eigenvectors** which means that the **mode shapes are complex**. + +
+
+ +A **complex mode** is one in which each part of the structure has not only its own magnitude of vibration but also its **own phase**. +As a result, each part of a structure which is vibrating in a complex mode will **reach its own maximum deflection at a different instant in the vibration cycle** to that of its neighbors. + +
+ +
+
+ +A **real mode** is one in which the phase angles are all identically \\(\SI{0}{\degree}\\) or \\(\SI{180}{\degree}\\) and which there has the property that all parts in the structure do reach their own maxima at the same time. +Equally, in a real mode, all parts of the structure pass through their **zero deflection position at the same instant** so that there are two moments in each vibration cycle when the structure is completely un-deformed. + +
+ +While the real mode has the appearance of a **standing wave**, the complex mode is better described as exhibiting **traveling waves** (illustrated on figure [fig:real_complex_modes](#fig:real_complex_modes)). + + + +{{< figure src="/ox-hugo/ewins00_real_complex_modes.png" caption="Figure 6: Real and complex mode shapes displays" >}} + +Another method of displaying **modal complexity** is by plotting the elements of the eigenvector on an **Argand diagram**, such as the ones shown in figure [fig:argand_diagram](#fig:argand_diagram). +Note that the almost-real mode shape does not necessarily have vector elements with near \\(\SI{0}{\degree}\\) or near \\(\SI{180}{\degree}\\) phase, what matters are the **relative phases** between the different elements. + + +
+ Table 7: + Complex mode shapes plotted on Argand diagrams +
+ +| ![](/ox-hugo/ewins00_argand_diagram_a.png) | ![](/ox-hugo/ewins00_argand_diagram_b.png) | ![](/ox-hugo/ewins00_argand_diagram_c.png) | +|--------------------------------------------|--------------------------------------------|-----------------------------------------------| +| Almost-real mode | Complex Mode | Measure of complexity | +| width=\linewidth | width=\linewidth | width=\linewidth | + + +#### Measurement of modal complexity {#measurement-of-modal-complexity} + +There exist few indicators of the modal complexity. +The first one, a simple and crude one, called **MCF1** consists of summing all the phase differences between every combination of two eigenvector elements: +\\[ \text{MCF1} = \sum\_{j=1}^N \sum\_{k=1 \neq j}^N (\theta\_{rj} - \theta\_{rk}) \\] + +The second measure is shown on figure [fig:argand_diagram_c](#fig:argand_diagram_c) where a polygon is drawn around the extremities of the individual vectors. +The obtained area of this polygon is then compared with the area of the circle which is based on the length of the largest vector element. The resulting ratio is used as an indication of the complexity of the mode, and is defined as **MCF2**. + + +#### Origins of complex modes {#origins-of-complex-modes} + +Complex modes occur in practice for variety of **physical reasons** as well as **poor measurement or analysis**: + +- The types of modes which are referred to as **operating deflection shapes** will frequently exhibit the relative phases differences between responses of adjacent parts of the structure which indicate a complex mode. +- Complex normal modes can exist even in simple structures which contain rotating components that are prone to gyroscopic forces. +- However, normal modes of non-rotating linear structures can be complex only if the **damping is distributed in a non-proportional way**. + This situation can arise quite readily in practice because while the internal (hysteresis) damping of most structural elements is distributed in a way which is essentially proportional to the stiffness distribution, the **majority of the damping** in real structures is generally found to be **concentrated at the joints** between components of a structural assembly and this does not usually result in a proportional distribution. +- Another ingredient is found to be necessary to generate significant complexity in a structure's mode and that is the requirement that **two or mode of its modes are close**. + Close modes are those whose natural frequencies are separated by an amount which is less than the prevailing damping in either or both modes. + + +### Characteristics of MDOF FRF data {#characteristics-of-mdof-frf-data} + + +#### A note about natural frequencies {#a-note-about-natural-frequencies} + +The basic definition derives from the **undamped system's eigenvalues** which yield the frequencies at which **free vibration of the system can take place**. +These undamped system natural frequencies are given by the square roots of the eigenvalues and are identified by the symbol \\(\bar{\omega}\_r\\). +This occurs in expressions for both free and forced vibration response: +\\[ x(t) = \sum\_{r=1}^N x\_r e^{i\bar{\omega}\_r t}; \quad \alpha(\omega) = \sum\_{r=1}^N \frac{A\_r}{\bar{\omega}\_r^2 - \omega^2} \\] + +For **damped systems**, two alternative characteristics frequency are defined: + +- \\(\omega\_r^\prime\\) for **free vibration** +- \\(\omega\_r\\) for **forced vibration** + +The former constitutes the oscillatory part of the free vibration characteristic which, being **complex**, contains an exponential decay term as well: +\\[ x(t) = \sum\_{r=1}^N x\_r e^{-a\_r t} e^{i \omega\_r^\prime t} \\] +where \\(\omega\_r^\prime\\) may not be identical to \\(\bar{\omega}\_r\\) depending on the type and distribution of the damping. + +The second definition comes from the general form of the FRF expression: +\\[ \alpha(\omega) = \sum\_{r=1}^N \frac{C\_r}{\omega\_r^2 - \omega^2 + i D\_r} \\] +Here \\(C\_r\\) may be complex whereas \\(D\_r\\) is real. +\\(\omega\_r\\) is in general different to both \\(\bar{\omega}\_r\\) and \\(\omega\_r^\prime\\). + +Table [tab:frf_natural_frequencies](#tab:frf_natural_frequencies) summarizes all the different cases. + + +
+ Table 8: + FRF Formulae and Natural Frequencies +
+ +| **Case** | **C** | **D** | **Free** \\(\omega\_r^\prime\\) | **Forced** \\(\omega\_r\\) | +|-------------|---------------------------------|---------------------------------|----------------------------------|----------------------------| +| Undamped | \\(\mathbb{R}\\) | 0 | \\(\bar{\omega}\_r\\) | \\(\bar{\omega}\_r\\) | +| Prop. Hyst. | \\(\mathbb{R}\\) | \\(\mathbb{R}\\) | \\(\bar{\omega}\_r\\) | \\(\bar{\omega}\_r\\) | +| Prop. Visc. | \\(\mathbb{R}\\) | \\(\mathbb{R}\\) (\\(\omega\\)) | \\(\omega\_r\sqrt{1-\xi\_r^2}\\) | \\(\bar{\omega}\_r\\) | +| Gen. Hyst. | \\(\mathbb{C}\\) | \\(\mathbb{R}\\) | \\(\omega\_r\\) | \\(\omega\_r\\) | +| Gen. Visc. | \\(\mathbb{C}\\) (\\(\omega\\)) | \\(\mathbb{R}\\) (\\(\omega\\)) | \\(\omega\_r\sqrt{1-\xi\_r^2}\\) | \\(\omega\_r\\) | + + +#### Mobility and Impedance FRF Parameters {#mobility-and-impedance-frf-parameters} + +As well as for SDOF systems, there are three main forms of FRF: using displacement (**receptance**), velocity (**mobility**) or acceleration (**inertance**) response. +There exist a further three formats for FRF data, these being the **inverses** of the standard receptance, mobility and inertance: the **dynamic stiffness**, **mechanical impedance** and **apparent mass**, respectively. + +However, for MDOF systems it is usually **not feasible to measure the inverse properties**. +In general, we can determine the response of a structure to an excitation using the equation +\\[ {\dot{X}} = [Y(\omega)] \\{F\\} \\] +Equally, we can write the inverse equation using impedance instead of mobilities: +\\[ \\{F\\} = [Z(\omega)] \\{V\\} \\] + +The problem arises because the general element in the mobility matrix \\(Y\_{ik}(\omega)\\) is not simply related to its counterpart in the impedance matrix \\(Z\_{ik}(\omega)\\) as was the case for SDOF systems (\\(Y\_{ik}(\omega) = Z\_{jk}^{-1}(\omega)\\)). + +The reason for this is that +\\[ Y\_{kj}(\omega) = \left( \frac{V\_k}{F\_j} \right)\_{F\_l = 0}; \quad Z\_{jk}(\omega) = \left( \frac{F\_j}{V\_k} \right)\_{V\_l = 0}; \ l \neq k \\] + +Thus, the measure of impedance property demands that **all DOFs expect one are grounded** which is almost impossible in practice. +The only types of FRF which we can expect to measure directly are the mobilities. + + +#### Display for Undamped System FRF data {#display-for-undamped-system-frf-data} + + +##### Construction of FRF plots for 2DOF system {#construction-of-frf-plots-for-2dof-system} + +We can envisage the form which the total FRF curve will take as it is simply the summation of all the individual terms. +However, the exact shape of the curve also depends on the **phase** of each term. +Then the addition of various components is made to determine the complete receptance expression, the signs of the various terms are obviously of considerable importance. + +Let's consider an example with two modes. +We write \\(\alpha\_{11}\\) the point FRF and \\(\alpha\_{21}\\) the transfer FRF: + +\begin{align\*} + \alpha\_{11}(\omega) &= \frac{0.5}{\omega\_1^2 - \omega^2} + \frac{0.5}{\omega\_2^2 - \omega^2} \\\\\\ + \alpha\_{21}(\omega) &= \frac{0.5}{\omega\_1^2 - \omega^2} - \frac{0.5}{\omega\_2^2 - \omega^2} +\end{align\*} + +It can be seen that the only difference between the point and transfer receptance is in the sign of the modal constant of the second mode. + +Consider the first point mobility (figure [fig:mobility_frf_mdof_point](#fig:mobility_frf_mdof_point)), between the two resonances, the two components have opposite signs so that they are substractive rather than additive, and indeed, at the point where they cross, their sum is zero. +On a logarithmic plot, this produces the antiresonance characteristic which reflects that of the resonance. + + +
+ Table 9: + Mobility FRF plot for undamped 2DOF system +
+ +| ![](/ox-hugo/ewins00_mobility_frf_mdof_point.png) | ![](/ox-hugo/ewins00_mobility_frf_mdof_transfer.png) | +|---------------------------------------------------|------------------------------------------------------| +| Point FRF | Transfer FRF | +| width=\linewidth | width=\linewidth | + +For the plot in figure [fig:mobility_frf_mdof_transfer](#fig:mobility_frf_mdof_transfer), between the two resonances, the two components have the same sign and they add up, no antiresonance is present. + + +##### FRF modulus plots for MDOF systems {#frf-modulus-plots-for-mdof-systems} + +The same principle may be extended to any number of DOF. +The fundamental rule is that **if two consecutive modes have the same sign for the modal constants**, then there will be an **antiresonance** at some frequency between the natural frequency of the two modes. +If they have apposite signs, there will not be an antiresonance. + + +#### Display of FRF Data for Damped systems {#display-of-frf-data-for-damped-systems} + + +##### Bode plots {#bode-plots} + +The resonances and antiresonances are blunted by the inclusion of damping, and the phase angles are no longer exactly \\(\SI{0}{\degree}\\) or \\(\SI{180}{\degree}\\), but the general appearance of the plot is a natural extension of that for the system without damping. +Figure [fig:frf_damped_system](#fig:frf_damped_system) shows a plot for the same mobility as appears in figure [fig:mobility_frf_mdof_point](#fig:mobility_frf_mdof_point) but here for a system with added damping. + +Most mobility plots have this general form as long as the modes are relatively well-separated. + +This condition is satisfied unless the separation between adjacent natural frequencies is of the same order as, or less than, the modal damping factors, in which case it becomes difficult to distinguish the individual modes. + + + +{{< figure src="/ox-hugo/ewins00_frf_damped_system.png" caption="Figure 7: Mobility plot of a damped system" >}} + + +##### Nyquist diagrams {#nyquist-diagrams} + +Each of the frequency response of a MDOF system in the Nyquist plot is composed of a number of SDOF components. + +Figure [fig:nyquist_point](#fig:nyquist_point) shows the result of plotting the point receptance \\(\alpha\_{11}\\) for the 2DOF system described above. + +The plot for the transfer receptance \\(\alpha\_{21}\\) is presented in figure [fig:nyquist_transfer](#fig:nyquist_transfer) where it may be seen that the opposing signs of the modal constants of the two modes have caused one of the modal circle to be in the upper half of the complex plane. + + +
+ Table 10: + Nyquist FRF plot for proportionally-damped system +
+ +| ![](/ox-hugo/ewins00_nyquist_point.png) | ![](/ox-hugo/ewins00_nyquist_transfer.png) | +|------------------------------------------|---------------------------------------------| +| Point receptance | Transfer receptance | +| width=\linewidth | width=\linewidth | + +In the two figures [fig:nyquist_nonpropdamp_point](#fig:nyquist_nonpropdamp_point) and [fig:nyquist_nonpropdamp_transfer](#fig:nyquist_nonpropdamp_transfer), we show corresponding data for **non-proportional** damping. +In this case, a relative phase has been introduced between the first and second elements of the eigenvectors: of \\(\SI{30}{\degree}\\) in mode 1 and of \\(\SI{150}{\degree}\\) in mode 2. +Now we find that the individual modal circles are no longer "upright" but are **rotated by an amount dictated by the complexity of the modal constants**. + + +
+ Table 11: + Nyquist FRF plot for non-proportionally-damped system +
+ +| ![](/ox-hugo/ewins00_nyquist_nonpropdamp_point.png) | ![](/ox-hugo/ewins00_nyquist_nonpropdamp_transfer.png) | +|-----------------------------------------------------|--------------------------------------------------------| +| Point receptance | Transfer receptance | +| width=\linewidth | width=\linewidth | + + +### Non-Sinusoidal vibration and FRF properties {#non-sinusoidal-vibration-and-frf-properties} + +With receptance and other FRF data, we have a means of computing the response of a MDOF system to an excitation which consists of a set of harmonic forces of different amplitudes and phases but all of the same frequency. +In the general case we can simply write +\\[ \\{X\\} e^{i\omega t} = [\alpha(\omega)] \\{F\\} e^{i\omega t} \\] + + +#### Periodic vibration {#periodic-vibration} + + +##### Periodic signals as Fourier series {#periodic-signals-as-fourier-series} + +Let's first consider the case of periodic vibration, in which the excitation (and thus the response) is not simply sinusoidal although it has a property of periodicity. + +The easiest way of computing the responses in such a case is by mean of the **Fourier Series**. + +The basic principle of Fourier analysis is that any periodic function can be represented by a series of sinudoids of suitable frequencies, amplitudes and phases based on the fundamental period \\(T\\): + +\begin{equation} + \tcmbox{f\_0(t) = \sum\_{n=1}^\infty {}\_0F\_n e^{i\omega\_n t}; \quad \omega\_n = \frac{2 \pi n}{T}} +\end{equation} + +Once a frequency decomposition of the forcing function has been obtained, we may use the corresponding FRF data, computed at the specific frequencies present in the forcing spectrum, in order to compute the corresponding frequency components of the responses of interest: + +\begin{equation} + \tcmbox{x\_j(t) = \sum\_{n=1}^\infty \alpha\_{j0}(\omega\_n) {}\_0F\_ne^{i\omega\_n t}; \quad \omega\_n = \frac{2 \pi n}{T}} +\end{equation} + + +##### To derive FRF from periodic vibration signals {#to-derive-frf-from-periodic-vibration-signals} + +It is possible to determine a system's FRF properties from excitation and response measurements when the vibration is periodic. +To do this, it is necessary to determine the **Fourier Series** components of **both the input force** signal and of the relevant **output response** signal. +Both these series will contain components at the same set of discrete frequencies; these being integer multiples of \\(2\pi/T\\). + +One these two series are available, the FRF can be defined at the same set of frequency points by computing the **ratio of the response component to the input component**. + + +#### Transient vibration {#transient-vibration} + + +##### Analysis via Fourier transform {#analysis-via-fourier-transform} + +For most transient cases, the input function \\(f(t)\\) will satisfy the **Dirichlet condition** and so its Fourier Transform \\(F(\omega)\\) can be computed from [eq:fourier_transform](#eq:fourier_transform). + +\begin{equation} + F(\omega) = \frac{1}{2 \pi} \int\_{-\infty}^\infty f(t) e^{i\omega t} dt +\end{equation} + +Now, at any frequency \\(\omega\\), the corresponding Fourier Transform of the response \\(X(\omega)\\) can be determined from + +\begin{equation} + \tcmbox{X(\omega) = H(\omega) F(\omega)} +\end{equation} + +where \\(H(\omega)\\) represents the appropriate version of the FRF for the particular input and output parameters considered. + +We may then derive an expression for the response itself \\(x(t)\\) from the **Inverse Fourier Transform** of \\(X(\omega)\\) + +\begin{equation} + \tcmbox{x(t) = \int\_{-\infty}^\infty \left(H(\omega) F(\omega)\right)e^{i\omega t}d\omega} +\end{equation} + + +##### Response via time domain {#response-via-time-domain} + +This alternative analysis is referred to as **convolution** and is based on the ability to compute the response of a system to a simple unit impulse. + +Let consider a unit impulse excitation applied at \\(t = t^\prime\\) with infinite magnitude and that lasts for an infinitesimal period of time although the **area underneath it is equal to unity**. +The response of a system to such an excitation at \\(t>t^\prime\\) is defined as the system's unit **Impulse Response Function** (**IRF**) and has a direct relationship to the Frequency Response Function. + +The IRF is written has +\\[ h(t - t^\prime) \\] + +If we now consider a more general transient excitation, we see that it is possible to represent this as the **superposition of several impulses**, each of magnitude \\(f(t^\prime)dt^\prime\\) and occurring at different instants in time. +The response of a system at time \\(t\\) to just one of these incremental impulses at time \\(t^\prime\\) is +\\[ \delta x (t) = h(t - t^\prime) f(t^\prime) dt^\prime \\] +and the total response of the system will be given by superimposing or integrating all the incremental responses as follows +\\[ x(t) = \int\_{-\infty}^\infty h(t-t^\prime) f(t^\prime) dt^\prime; \quad h(t-t^\prime) = 0, \ t \leq t^\prime \\] + +There is a very close relationship between \\(H(\omega)\\) and \\(h(t - t^\prime)\\). +Let's use the Fourier Transform approach to compute the response of a system to a unit impulse. +Thus, let \\(f(t) = \delta(0)\\) and determine its Fourier Transform \\(F(\omega)\\): +\\[ F(\omega) = \frac{1}{2 \pi} \int\_{-\infty}^\infty \delta(0) e^{i\omega t} dt = \frac{1}{2\pi} \\] +Then +\\[ x(t) = \frac{1}{2\pi} \int\_{-\infty}^{\infty} H(\omega) e^{i\omega t} d\omega \triangleq h(t) \\] + +Thus, we find that the **Impulse and Frequency Response Functions constitute a Fourier Transform pair**. + + +##### To derive FRF from transient vibration signals {#to-derive-frf-from-transient-vibration-signals} + +In order to obtain the structure's FRF properties using a transient vibration test, the calculation of the Fourier transforms of both the excitation and the response signals is required. +Then, the ratio of these two function can be computed + +\begin{equation} + H(\omega) = \frac{X(\omega)}{F(\omega)} +\end{equation} + +This can be done provided that the time period of the measurement both excitation and response signals are effectively zero at the start and the end of the sample. + + +#### Random vibration {#random-vibration} + + +##### Random signals in time and frequency domains {#random-signals-in-time-and-frequency-domains} + +We here consider both excitation and response described by random processes. +Neither excitation nor response signals can be subjected to a valid Fourier Transform calculation as they violate the Dirichlet condition. + +It is necessary to introduce the **Correlation Function** and the **Spectral Densities**. + +
+
+ +The **Autocorrelation Function** \\(R\_{ff}(\tau)\\) of a random vibration parameter \\(f(t)\\), is defined as the expected value of the product \\(f(t) f(t + \tau)\\) computed along the time axis. +This will always be a **real and even function of time**, and is written + +\begin{equation} + R\_{ff}(\tau) = E[f(t) f(t + \tau)] \label{eq:autocorrelation} +\end{equation} + +
+ +This correlation function, unlike the original quantity \\(f(t)\\) does satisfy the requirements for Fourier transformation and thus we can obtain its Fourier Transform by the usual equation. + +The resulting parameter we shall call a **Spectral Density**, in this case the **Auto** or **Power Spectral Density** (PSD) \\(S\_{ff}(\omega)\\). + +
+
+ +\begin{equation} + S\_{ff}(\omega) = \frac{1}{2\pi} \int\_{-\infty}^\infty R\_{ff}(\tau) e^{-i \omega \tau} d\tau \label{eq:powerspectraldensity} +\end{equation} + +
+ +The Spectral Density is a real and even function of frequency, and does in fact provides a description of the frequency composition of the original function \\(f(t)\\). +It has units of \\(f^2/\omega\\). + +Examples of random signals, autocorrelation function and power spectral density are shown on figure [fig:random_signals](#fig:random_signals). + + +
+ Table 12: + Random signals +
+ +| ![](/ox-hugo/ewins00_random_time.png) | ![](/ox-hugo/ewins00_random_autocorrelation.png) | ![](/ox-hugo/ewins00_random_psd.png) | +|---------------------------------------|--------------------------------------------------|------------------------------------------------| +| Time history | Autocorrelation Function | Power Spectral Density | +| width=\linewidth | width=\linewidth | width=\linewidth | + +A similar concept can be applied to a pair of functions such as \\(f(t)\\) and \\(x(t)\\) to produce **cross correlation** and **cross spectral density** functions. + +
+
+ +The cross correlation function \\(R\_{xf}(\tau)\\) between functions \\(f(t)\\) and \\(x(t)\\) is defined as + +\begin{equation} + R\_{xf}(\tau) = E[x(t) f(t + \tau)] \label{eq:crosscorelation} +\end{equation} + +
+ +
+
+ +The Cross Spectral Density (CSD) is defined as the Fourier Transform of the Cross Correlation function: + +\begin{equation} + S\_{xf}(\omega) = \frac{1}{2\pi} \int\_{-\infty}^\infty R\_{xf}(\tau) e^{-i \omega \tau} d\tau \label{eq:cross\_spectral\_density} +\end{equation} + +
+ +Cross correlation functions are real, but not always even, functions of time, and cross spectral densities, unlike auto spectral densities, are generally complex functions of frequency with the particular **conjugate property** that +\\[ S\_{xf}(\omega) = S\_{fx}^\*(\omega) \\] + +The analysis to obtain the input/output relationships for systems undergoing random vibrations is based on the general excitation/response relationship in the time domain: +\\[ x(t) = \int\_{-\infty}^\infty h(t - t^\prime) f(t^\prime) dt^\prime \\] + +Using this property, it is possible to derive an expression for $x(t) and for \\(x(t - \tau)\\) and thus to calculate the response autocorrelation \\(R\_{xx}(\tau)\\) +\\[ R\_{xx}(\tau) = E[x(t) x(t + \tau)] \\] + +This equation can be manipulated to describe the response autocorrelation in terms of the corresponding property of the excitation \\(R\_{ff}\\), but the result is complicated. +However, the same equation can be transform to the frequency domain + +\begin{equation} + \tcmbox{ S\_{xx}(\omega) = \left| H(\omega) \right|^2 S\_{ff}(\omega) } +\end{equation} + +Although very convenient, equation [eq:psd_input_output](#eq:psd_input_output) does not provide a complete description of the random vibration conditions. +Further, it is clear that **is could not be used to determine the FRF** from measurement of excitation and response because it **contains only the modulus** of \\(H(\omega)\\), the phase information begin omitted from this formula. + +A second equation is required and this may be obtain by a similar analysis, two alternative formulas can be obtained [eq:cross_relation_alternatives](#eq:cross_relation_alternatives). + +
+
+ +\begin{subequations} + \begin{align} + S\_{fx}(\omega) &= H(\omega) S\_{ff}(\omega) \\\\\\ + S\_{xx}(\omega) &= H(\omega) S\_{xf}(\omega) + \end{align} +\end{subequations} + +
+ + +##### To derive FRF from random vibration signals {#to-derive-frf-from-random-vibration-signals} + +The pair of equations [eq:cross_relation_alternatives](#eq:cross_relation_alternatives) provides the basic of determining a system's FRF properties from the measurements and analysis of a random vibration test. +Using either of them, we have a simple formula for determining the FRF from estimates of the relevant spectral densities [eq:frf_estimates_spectral_densities](#eq:frf_estimates_spectral_densities). + +
+
+ +\begin{subequations} + \begin{align} + H(\omega) &= \frac{S\_{fx}(\omega)}{S\_{ff}(\omega)} = H\_1(\omega)\label{eq:H1} \\\\\\ + H(\omega) &= \frac{S\_{xx}(\omega)}{S\_{xf}(\omega)} = H\_2(\omega)\label{eq:H2} + \end{align} +\end{subequations} + +
+ +The existence of two equations presents an opportunity to **check the quality** of calculations made using measured data. + + +##### Instrumental variable model for FRF {#instrumental-variable-model-for-frf} + +There are difficulties to implement some of the above formulae in practice because of noise and other limitations concerned with the data acquisition and processing. + +One technique involves **three quantities**, rather than two, in the definition of the output/input ratio. +The system considered can best be described with reference to figure [fig:frf_determination](#fig:frf_determination) which shows first in [fig:frf_siso_model](#fig:frf_siso_model) the traditional single-input single-output model upon which the previous formulae are based. +Then in [fig:frf_feedback_model](#fig:frf_feedback_model) is given a more detailed and representative model of the system which is used in a modal test. + + +
+ Table 13: + System for FRF determination +
+ +| ![](/ox-hugo/ewins00_frf_siso_model.png) | ![](/ox-hugo/ewins00_frf_feedback_model.png) | +|------------------------------------------|--------------------------------------------------| +| Basic SISO model | SISO model with feedback | +| width=\linewidth | width=\linewidth | + +In this configuration, it can be seen that there are two feedback mechanisms which apply. +We then introduce an alternative formula which is available for the determination of the system FRF from measurements of the input and output quantities [eq:H3](#eq:H3). + +
+
+ +\begin{equation} + H(\omega) = \frac{S\_{x^\prime v}(\omega)}{S\_{f^\prime v(\omega)}} = H\_3(\omega) +\end{equation} + +where \\(v\\) is a third signal in the system. + +
+ + +##### Derivation of FRF from MIMO data {#derivation-of-frf-from-mimo-data} + +A diagram for the general n-input case is shown in figure [fig:frf_mimo](#fig:frf_mimo). + +We obtain two alternative formulas: + +\begin{subequations} + \begin{align} + \left[ H\_{xf}(\omega) \right]\_{n \times n} &= \left[ S\_{x^\prime v}(\omega) \right]\_{n \times n} \left[ S\_{f^\prime v}(\omega) \right]\_{n \times n}^{-1} \\\\\\ + \left[ H\_{xf}(\omega) \right]\_{n \times n} &= \left[ S\_{f^\prime f^\prime}(\omega) \right]\_{n \times n}^{-1} \left[ S\_{x^\prime f^\prime}(\omega) \right]\_{n \times n} + \end{align} +\end{subequations} + +In practical application of both of these formulae, care must be taken to ensure the non-singularity of the spectral density matrix which is to be inverted, and it is in this respect that the former version may be found to be more reliable. + + + +{{< figure src="/ox-hugo/ewins00_frf_mimo.png" caption="Figure 8: System for FRF determination via MIMO model" >}} + + +### Complete and Incomplete models {#complete-and-incomplete-models} + + +#### Some definitions {#some-definitions} + +Most of the preceding theory has been concerned with complete models; that is, the analysis has been presented for an \\(N\\) degree-of-freedom system with the implicit assumption that all the mass, stiffness and damping properties are known and that all the elements in the eigenmatrices and the FRF matrix are available. +While this is a valid approach for a theoretical study, it is less generally applicable for experimentally-based investigations where it is **not usually possible to measure all the DOFs**, or to examine all the modes possessed by a structure. +Because of this limitation, it is necessary to extend our analysis to examine the **implications of having access to something less than a complete set of data**, or model, and this leads us to the concept of a **reduced** or **incomplete** type of model. + +
+
+ +There are **different types of incomplete models**: + +1. There is the model which is reduced in size (from \\(N\\) to \\(n\\)) by simply **deleting information about certain degrees-of-freedom**. + This process leads to a reduced model which retains **full accuracy for the DOFs which are retained**, but which **looses access to those which have been deleted**. + The process can be **applied only to the modal and response models** and results in a modal model described by an \\(N\times N\\) eigenvalue matrix but by an eigenvector matrix which is only \\(n\times N\\). + The corresponding response model is an incomplete FRF matrix of size \\(n \times n\\), although all the elements of that reduced matrix are themselves fully accurate. +2. Another type of reduced model is one in which the **number of modes is reduced as well** (from \\(N\\) to \\(m\\)), so that the eigenvalue matrix is only \\(m \times m\\) in size. + A consequence of this is that the elements in the reduced \\(n \times n\\) FRF matrix in this case are only **approximate**. +3. Another type of model reduction can be achieved by **condensation** from \\(N\\) to \\(n\\) DOFs. + This is a process in which a number of DOFs are again eliminated from the complete description but an attempt is made to include the effects of the masses and stiffnesses which are thereby eliminated in the retained DOFs. + This is the **condensation process** which is applied in the **Guyan** and other **reduction techniques** used to contain the size of otherwise very large finite element models. + In such a condensed model, the spatial, modal and response models are all reduced to \\(n \times n\\) matrices, and it must be noted that the properties of each are approximate in every respect. + +
+ + +#### Incomplete Response models {#incomplete-response-models} + +There are two ways in which a model can be incomplete: by the **omission of some modes**, and/or by the **omission of some degrees-of-freedom**. + + +##### Omission of some DOFs {#omission-of-some-dofs} + +Consider first the complete FRF matrix which is \\(N \times N\\): +\\[ \left[ H(\omega) \right]\_{N \times N} \\] +and then suppose that we decide to limit our description of the system to **include certain DOFs only**. +Our reduced response model is now +\\[ \left[ H^R(\omega) \right]\_{n \times n} \\] + +Now it is clear that we have not altered the basic system, and it still has the same number of degrees-of-freedom even though we have **foregone our ability to describe the system's behavior at all of them**. +In this case, the elements which remain in the reduced FRF matrix are **identical** to the corresponding elements in the full \\(N \times N\\) matrix. + +At this point, it is appropriate to mention the consequences of this type of reduction on the impedance type of FRF data. +The impedance matrix which corresponds to the reduced model defined by \\([H^R]\\) will be denoted as \\([Z^R]\\) and it is clear that +\\[ [Z^R(\omega)] = [H^R(\omega)]^{-1} \\] + +It is also clear that the elements in the reduced impedance matrix such as \\(Z\_{jk}^R\\) are **not** the same quantities as the corresponding elements in the full impedance matrix, and indeed, a completely different impedance matrix applied to each specific reduction: +\\[ H\_{ij}^R(\omega) = H\_{ij}(\omega); \quad Z\_{ij}(\omega) \neq Z\_{ij}(\omega) \\] + +We can also consider the implications of this form of reduction on the other types of model, namely the modal model and the spatial model. +For the **modal model**, elimination of the data pertaining to some of the DOFs results in a smaller eigenvector matrix, which then becomes rectangular of order \\(n \times N\\). +The corresponding eigenvalue matrix is still \\(N \times N\\) because we still have all \\(N\\) modes included. + +For the **spatial model**, it is more difficult to effect a reduction of this type. +It is clearly not realistic simply to remove the rows and columns corresponding to eliminated DOFs from the mass and stiffness matrices as this would represent a drastic change to the system. +It is possible, however, to reduce these spatial matrices by a number of methods which have the effect of redistributing the mass and stiffness properties which relate to the redundant DOFs among those which are retained. +In this way, the total mass of the structure, and its correct-stiffness properties can be largely retained. +The **Guyan reduction procedure** is perhaps the best known of this type. +Such reduced spatial properties will be denoted as +\\[ \left[M^R\right], \ \left[K^R\right] \\] + + +##### Omission of some modes {#omission-of-some-modes} + +Let's consider the other form of reduction in which only \\(m\\) of the \\(N\\) modes of the system are included. +Frequently, this is a necessary approach in that **many of the high-frequency modes will be of little interest** and almost certainly very difficult to measure. +Consider first the FRF matrix and include initially **all** the DOFs but suppose that each element in the matrix is computed using only \\(m\\) of the \\(N\\) terms in the summation +\\[ \tilde{H}\_{jk}(\omega) = \sum\_{r=1}^{m\leq N} \frac{{{}\_rA\_{jk}}}{\omega\_r^2 - \omega^2 + i \eta\_r \omega\_r^2} \\] +In full, we can write the FRF matrix as +\\[ \left[\tilde{H}(\omega)\right]\_{N\times N} = \left[\Phi\right]\_{N \times m} \left[\lambda\_r^2 - \omega^2\right]\_{m \times m}^{-1} \left[\Phi\right]\_{m \times N}^T \\] + + +##### Combination of both reduction {#combination-of-both-reduction} + +Of course, both types of reduction can be combined, the resulting matrix obtained would be +\\[ \left[ \hat{H}^R(\omega) \right]\_{n \times n} \\] +However, \\([\hat{H}^R(\omega)]\\) will in general be rank deficient, and thus is will not be possible to obtain the impedance matrix by numerical inversion. +In order to overcome this problem, it is often convenient to add a constant or **residual term** to each FRF: +\\[ [H(\omega)] = [\hat{H}(\omega)] + [R] \\] + + +#### Incomplete modal and spatial models {#incomplete-modal-and-spatial-models} + +It has been shown that the **orthogonality properties** of the modal model provide a direct **link between the modal and the spatial model**: +\\[ [\Phi]^T [M] [\Phi] = [I]; \quad [\Phi]^T [K] [\Phi] = [\omega\_r^2] \\] +Which can be inverted to yield + +\begin{subequations} + \begin{align} + [M] &= [\Phi]^{-T}[\Phi]^{-1}\\\\\\ + [K] &= [\Phi]^{-T}[\omega\_r^2][\Phi]^{-1} + \end{align} +\end{subequations} + +If the modal model is incomplete, then we can note the implications for the orthogonality properties. + +First, if we have a **modal incompleteness** (\\(m +
+ +The sensitivity of a model describe the rates of change of some of the key properties, such as the natural frequencies and mode shapes, with small changes in some of the modal parameters, such as individual masses of stiffnesses. + + + +The model sensitivities are required for various purposes: + +- they help to locate errors in models +- the are useful in guiding design optimization procedures +- they are used in the course of curve-fitting for the purposes of testing the **reliability** of the modal analysis processes + + +#### Modal sensitivities {#modal-sensitivities} + +The most commonly used sensitivities are those which describe the **rates of change of the modal parameters with the individual mass and stiffness elements in the spatial model**. +These quantities are defined as follows: +\\[ \frac{\partial \omega\_r}{\partial p} \text{ and } \frac{\partial \\{\phi\\}\_r}{\partial p} \\] +where \\(p\\) represents any variable of interest + + +##### SDOF system {#sdof-system} + +It is useful to approach the general expressions for these parameters via a simple example based on an undamped SDOF system. +We can introduce the concept of sensitivity through the basic SDOF system comprising mass \\(m\\) and spring \\(k\\). +We can define the basic sensitivities of the system's natural frequency \\(\omega\_0\\): +\\[ \frac{\partial \omega\_0}{\partial m} \text{ and } \frac{\partial \omega\_0}{\partial k} \\] + +We can show that: +\\[ \frac{\partial \omega\_0^2}{\partial m} = \frac{-\sqrt{k}}{m^2} ; \quad \frac{\partial \omega\_0}{\partial k} = \frac{1}{2 \sqrt{km}} \\] + + +##### MDOF systems - eigenvalue sensitivity {#mdof-systems-eigenvalue-sensitivity} + +We can differentiate the following equation of motion of a MDOF system with respect to an arbitrary variable \\(p\\) that might be an individual mass \\(m\_i\\) of stiffness \\(k\_j\\). +\\[ \left( [K] - \omega\_r^2 [M] \right) \\{\phi\\}\_r = \\{0\\} \\] + +We then obtain +\\[ \frac{\partial \omega\_r^2}{\partial p} = \\{\phi\\}\_r^T \left( \frac{\partial [K]}{\partial p} - \omega\_r^2 \frac{\partial [M]}{\partial p} \right) \\{\phi\\}\_r \\] + + +##### MDOF systems - eigenvector sensitivity {#mdof-systems-eigenvector-sensitivity} + +A similar analysis can be made for the eigenvector sensitivity terms. + + +#### FRF sensitivities {#frf-sensitivities} + +It may be seen it is also possible to derive **FRF sensitivities**. + +If we consider first the simple SDOF system with a receptance FRF \\(\alpha(\omega)\\) +\\[ \alpha(\omega) = \frac{1}{k + i \omega c - \omega^2 m} \\] +We can differentiate this with respect to \\(m\\) and \\(k\\). + +The same can be done with the more general case for \\(MDOF\\) system as follows: +\\[ \frac{\partial [\alpha(\omega)]}{\partial p} = [\alpha(\omega)] \left( \frac{\partial [K]}{\partial p} + i \omega \frac{\partial [C]}{\partial p} - \omega^2\frac{\partial [M]}{\partial p} \right) [\alpha(\omega)] \\] + + +#### Modal sensitivities from FRF data {#modal-sensitivities-from-frf-data} + +There exists the possibility of deriving certain sensitivity parameters directly from FRF data such as can be measured in a modal test. +Essentially, it is possible to derive expressions for the **eigenvalue sensitivities to selected individual mass and stiffness parameters** by analyzing the point FRF properties at the selected DOFs. + + +## FRF Measurement Techniques {#frf-measurement-techniques} + + +### Introduction and Test Planning {#introduction-and-test-planning} + + +#### Introduction {#introduction} + +There are **two types of vibration measurement**: + +- those in which just the response level is measured +- those in which both input and response output parameters are measured + +Recalling the basic relationship: +\\[ \tcmbox{ \text{response} = \text{properties} \times \text{input} } \\] + +We can see that **only** when two of the three terms in this equation have been measured, we can defined completely what is going on in the vibration of the test object. +If we measure only the response, then we are unable to say whether a particularly large response level is due to a strong excitation or to a resonance of the structure. + +For the second type of vibration measurement, both the excitation and the response are measured **simultaneously** so that basic equation can be used to deduce the system properties. + +Our interest will first be on the **mobility measurements** or **FRF measurements** where the **excitation is applied at a single point**. +In that case, FRF data are directly obtained by "dividing" the measured responses by the measured excitation force. + +Responses obtained using **several simultaneous excitations** yield **Operation Deflection Shapes** (ODSs) from which it is necessary to extract the required FRF data by sometimes complicated analysis procedures. +These are referred to as **MIMO tests**. + + +#### Test Planning {#test-planning} + +It is clear that there will need to be an extensive test planning phase before full-scale measurements are made an decisions taken concerning the methods of excitation, signal processing and data analysis, as well as the proper selection of which data to measure, where to excite the structure and how to prepare and support it for those measurements. + + +#### Checking the quality of the measured data {#checking-the-quality-of-the-measured-data} + + +##### Signal Quality {#signal-quality} + +It is sometimes found that the dynamic range of the measured quantities is extreme, especially when the frequency range being covered is wide. +What often happens is that there is a very large component of signal in one frequency range that dictates the gain settings on amplifiers and analysers such that lower-level components are difficult to measure accurately. + + +##### Signal fidelity {#signal-fidelity} + +This arise when the signals obtained do not truly represent the quantity which is to be measured. +For example, large motion perpendicular to the measurement axis can contaminate the measurement and gives misleading indications. + +One should verify that the labeling and the connection of transducers are correct. +This can be check by looking at the pattern of resonances visible on the FRF curves: + +- for excitation and response at the same DOF, resonances and anti-resonances must alternate +- excitation and response points which are well separated on the test structure will tend to possess fewer anti-resonances + + +##### Measurement repeatability {#measurement-repeatability} + +One essential check for any modal test is the repeatability of the measurements. +Certain FRF should be re-measured from time to time, just to check that neither the structure nor the measurement system have experienced any significant changes. + + +##### Measurement reliability {#measurement-reliability} + +We here seek to establish that the measured data are independent of the measuring system. +One should measure the same quantity (usually an FRF) with a slightly different setup, or procedure such as a different excitation signal. +These checks are very important to demonstrate the underlying validity of the measurement method being used. + + +##### Measured data consistency, including reciprocity {#measured-data-consistency-including-reciprocity} + +The various FRF data measured on a given structure should exhibit consistency, by which is meant that the underlying natural frequencies, damping factors and mode shapes visible in the FRF data must all derive from a common modal model. + +The reciprocity expected to exist between FRFs such as \\(H\_{jk}\\) and \\(H\_{kj}\\) should be checked and found to be at an acceptable level. + + +### Basic Measurement System {#basic-measurement-system} + +The experimental setup used for mobility measurement contains three major items: + +1. **An excitation mechanism**. This contains a source for the excitation signal (sinusoidal, periodic, random, transient), a power amplifier and a exciter (usually a shaker or an hammer) +2. **A transduction system**. For the most part, piezoelectric transducer are used, although lasers and strain gauges are convenient because of their minimal interference with the test object. Conditioning amplifiers are used depending of the transducer used +3. **An analyzer** + +A typical layout for the measurement system is shown on figure [fig:general_frf_measurement_setup](#fig:general_frf_measurement_setup). + + + +{{< figure src="/ox-hugo/ewins00_general_frf_measurement_setup.png" caption="Figure 9: General layout of FRF measurement system" >}} + + +### Structure preparation {#structure-preparation} + + +##### Free supports {#free-supports} + +By "free" is meant that the test object is not attached to ground at any of its coordinates and is, in effect, freely suspended in space. +In this condition, the structure will exhibit rigid body modes which are determined solely by its mass and inertia properties and in which there is no bending or flexing at all. +Six rigid body modes are then obtained with a natural frequency of \\(\SI{0}{Hz}\\). +Mass and inertial properties can then be measured with such a support. + +However, in practice it is not feasible to provide a truly free support. +Approximate to the free condition can be achieved by supporting the testpiece on very soft springs such that the frequency of the rigid body mode are less than \\(\SI{10}{\%}\\) of that of the lowest resonance frequency. + + +##### Grounded supports {#grounded-supports} + +The other type of support is referred to as "grounded" because it attempts to fix selected points on the structure to ground. +In practice, it is very difficult to attach the test structure to a base structure which is sufficiently rigid to provide the necessary grounding. + +The safest procedure is to measure the mobility FRF of the base structure itself over the frequency range for the test and to establish that this is a much lower mobility than the corresponding levels for the test structure at the point of attachment. +If this condition is satisfied for all the coordinates to be grounded, then the base structure can reasonably be assumed to be grounded. + + +##### Loaded boundaries {#loaded-boundaries} + +A compromise procedure can be applied in which the test object is connected at certain coordinates to another simple component of known mobility, such as a specific mass. +The effects of the added component is then removed analytically. + + +### Excitation of the structure {#excitation-of-the-structure} + +Devices for exciting the structure can be divided into two type: + +- **Contacting**: these involves the connection of an exciter of some form which remains attached to the structure throughout the test. +- **Non-contacting**: devices which are either out of contact throughout the vibration (such as provided by a voice coil) or which are only in contact for a short period (such as a hammer) + +Exciters are often limited at very low frequencies by the stroke rather than by the force generated. + + +#### Electromagnetic Exciters {#electromagnetic-exciters} + +The most common type of exciter is the electromagnetic shaker in which a magnetic force is applied is applied on the structure without any physical contact. + +The frequency and amplitude of the excitation are controlled independently of each other, which gives flexibility. + +However, we need a direct measurement of the force applied to the structure (we cannot rely on the current going through the coil). + +The shakers are usually stiff in the orthogonal directions to the excitation. +This can modify the response of the system in those directions. +In order to avoid that, a drive rod which is stiff in one direction and flexible in the other five directions is attached between the shaker and the structure as shown on figure [fig:shaker_rod](#fig:shaker_rod). +Typical size for the rod are \\(5\\) to \\(\SI{10}{mm}\\) long and \\(\SI{1}{mm}\\) in diameter, if the rod is longer, it may introduce the effect of its own resonances. + + + +{{< figure src="/ox-hugo/ewins00_shaker_rod.png" caption="Figure 10: Exciter attachment and drive rod assembly" >}} + +The support of shaker is also of primary importance. + +The setup shown on figure [fig:shaker_mount_1](#fig:shaker_mount_1) presents the most satisfactory arrangement in which the shaker is fixed to ground while the test structure is supported by a soft spring. + +Figure [fig:shaker_mount_2](#fig:shaker_mount_2) shows an alternative configuration in which the shaker itself is supported. +It may be necessary to add an additional inertia mass to the shaker in order to generate sufficient excitation forces at low frequencies. + +Figure [fig:shaker_mount_3](#fig:shaker_mount_3) shows an unsatisfactory setup. Indeed, the response measured at \\(A\\) would not be due solely to force applied at \\(B\\), but would also be caused by the forces applied at \\(C\\). + + +
+ Table 14: + Various mounting arrangement of exciter +
+ +| ![](/ox-hugo/ewins00_shaker_mount_1.png) | ![](/ox-hugo/ewins00_shaker_mount_2.png) | ![](/ox-hugo/ewins00_shaker_mount_3.png) | +|---------------------------------------------|-------------------------------------------------|------------------------------------------| +| Ideal Configuration | Suspended Configuration | Unsatisfactory | +| width=\linewidth | width=\linewidth | width=\linewidth | + + +#### Hammer or Impactor Excitation {#hammer-or-impactor-excitation} + +Although this type of test places greater demands on the analysis phase of the measurement process, it is a relatively simple means of exciting the structure. + +A set of different **tips** and **heads** are used to extend the frequency and force level ranges for testing a variety of different structure. +A **load cell** (or force transducer) which detects the magnitude of the force felt by the impactor is included. + +The magnitude of the impact is determined by the mass of the hammer head and its velocity when it hits the structure. + +The frequency range which is effectively excited is controlled by the stiffness of the contacting surface and the mass of the impactor head: there is a resonance at a frequency given by \\(\sqrt{\frac{\text{contact stiffness}}{\text{impactor mass}}}\\) above which it is difficult to deliver energy into the test structure. + +When the hammer tip impacts the test structure, this will experience a force pulse as shown on figure [fig:hammer_impulse](#fig:hammer_impulse). +A pulse of this type (half-sine shape) has a frequency content of the form illustrated on figure [fig:hammer_impulse](#fig:hammer_impulse). + + + +{{< figure src="/ox-hugo/ewins00_hammer_impulse.png" caption="Figure 11: Typical impact force pulse and spectrum" >}} + +The stiffer the materials, the shorter will be the duration of the pulse and the higher will be the frequency range covered by the impact. +Similarly, the lighter the impactor mass, the higher the effective frequency range. + +Generally, as soft a tip as possible will be used in order to inject all the input energy into the frequency range of interest: using a stiffer tip than necessary will result in energy being input to vibrations outside the range of interest at the expense of those inside of that range. + +One of the difficulties of applying excitation using a hammer is ensuring that **each impact is essentially the same** as the previous ones, not much in magnitude as in **position** and **orientation** relative to the normal of the surface. + + +### Transducers and Amplifiers {#transducers-and-amplifiers} + +The piezoelectric type of transducer is by far the most popular and widely used transducer in modal tests. + +Three types of piezoelectric transducers are available for mobility measurements: + +- **Force gauges** +- **Accelerometers** +- **Impedance heads**: simply a combination of force and acceleration sensitive elements in a single unit + +The basic principle of operation makes use of the fact that an element of piezoelectric material generates an electric charge across its end faces when subjected to a mechanical stress. +By suitable design, such a material may be incorporated into a device which **induces** in it a **stress proportional to the physical quantity to be measured**. + + +#### Force Transducers {#force-transducers} + +The force transducer is the simplest type of piezoelectric transducer. +The transmitter force \\(F\\) is applied directly across the crystal, which thus generates a corresponding charge \\(q\\), proportional to \\(F\\) (figure [fig:piezo_force_transducer](#fig:piezo_force_transducer)). + + + +{{< figure src="/ox-hugo/ewins00_piezo_force_transducer.png" caption="Figure 12: Force transducer" >}} + +There exists an undesirable possibility of a cross sensitivity, i.e. an electrical output when there is zero force \\(F\\) but, say, a transverse or shear loading. + + +#### Accelerometers {#accelerometers} + +In an accelerometer, transduction is indirect and is achieved using a seismic mass (figure [fig:piezo_accelerometer](#fig:piezo_accelerometer)). +In this configuration, the force exerted on the crystals is the inertia force of the seismic mass (\\(m\ddot{z}\\)). +Thus, so long as the body and the seismic mass move together, the output of the transducer will be proportional to the acceleration of its body \\(x\\). + + + +{{< figure src="/ox-hugo/ewins00_piezo_accelerometer.png" caption="Figure 13: Compression-type of piezoelectric accelerometer" >}} + +Analysis of a simple dynamical model for this device shows that the ratio \\(\ddot{x}/\ddot{z}\\) is effectively unity over a wide range of frequency from zero upwards until the first resonant frequency of the transducer. + +There is also a problem of cross or transverse sensitivity of accelerometers which can result from imperfections in the crystal geometry and from interaction through the casing. + + +#### Selection of accelerometers {#selection-of-accelerometers} + +Accelerometer sensitivities vary between \\(1\\) and \\(\SI{10000}{pC/g}\\). +In general, we require as high a sensitivity as possible, however, the heavier the transducer, the lower is the transducer's resonant frequency and thus the maximum working frequency. +For accurate measurements, especially on complex structures which are liable to vibrate simultaneously in several directions, transducers with low transverse sensitivity (less than \\(\SI{1}{\%}\\)) should be selected. + + +#### Conditioning Amplifiers {#conditioning-amplifiers} + +One of the advantages of the piezoelectric transducer is that it is an active device and does not require a power supply in order to function. +However, this means that it cannot measure truly static quantities and so there is a **low frequency limit** below which measurements are not practical. +This limit is usually determined not simply by the properties of the transducer itself, but also by those of the amplifiers used to boost the small electric charge that is generated by the crystals. + +Two types of amplifier are available for this role that both have very high input impedance: + +- **Voltage amplifiers** +- **Charge amplifiers** + +Voltage amplifiers tend to be simpler and to have a better signal/noise characteristic than charge amplifiers. +However, they cannot be used at such low frequencies as the charge amplifiers and the overall gain is affected by the length and properties of the transducer cable whereas that for a charge amplifier it is effectively independent of the cable. + + +#### Attachment of transducers {#attachment-of-transducers} + +The correct installation of transducers, especially accelerometers is important. + +There are various means of fixing the transducers to the surface of the test structure, some more convenient than others. +Some of these methods are illustrated in figure [fig:transducer_mounting_types](#fig:transducer_mounting_types). + +Shown on figure [fig:transducer_mounting_response](#fig:transducer_mounting_response) are typical high frequency limits for each type of attachment. + + +
+ Table 15: + Accelerometer attachment characteristics +
+ +| ![](/ox-hugo/ewins00_transducer_mounting_types.png) | ![](/ox-hugo/ewins00_transducer_mounting_response.png) | +|-----------------------------------------------------|------------------------------------------------------------| +| Attachment methods | Frequency response characteristics | +| width=\linewidth | width=\linewidth | + + +#### Location of transducers {#location-of-transducers} + +Another problem which may require the removal of the transducer to another location is the possibility that it is **positioned at or very close to a node** of one or more of the structure's modes. +In that case, it will be very difficult to make an effective measurement of that particular mode. + +Most modal test require a **point mobility measurement** as one of the measured FRF. +This is hard to achieve as both force and response transducer should be at the same point on the structure. +Three possibilities exist: + +1. Use an **impedance head** +2. Place the force and acceleration transducers **in line** but on opposite sides of the structure +3. Place the accelerometer **alongside**, as close as possible as the force gauge + +The third option is the most practical but is the one that presents the problem. +Particular care is required to ensure that the measurement is really representative of a point mobility: the accelerometer should be as close as possible as the force gauge. + + +### Digital Signal Processing {#digital-signal-processing} + + +#### Objective {#objective} + +The task of the spectrum analyser is to **estimate the Fourier transform** or **Spectral densities** of signals. + +We here relate the two most relevant versions of the fundamental Fourier transformation between the time and frequency domains. + +In its simplest form, this states that a function \\(x(t)\\), periodic in time \\(T\\), can be written as an infinite series: + +\begin{equation} + x(t) = \frac{a\_0}{2} + \sum\_{n=1}^\infty \left( a\_n \cos\frac{2 \pi n t}{T} + b\_n \sin\frac{2 \pi n t}{T} \right) +\end{equation} + +where \\(a\_i\\) and \\(b\_i\\) can be computed from knowledge of \\(x(t)\\) via the relationships: + +\begin{subequations} + \begin{align} + a\_n &= \frac{2}{T} \int\_0^T x(t) \cos \left(\frac{2 \pi n t }{T} \right) \\\\\\ + b\_n &= \frac{2}{T} \int\_0^T x(t) \sin \left(\frac{2 \pi n t }{T} \right) + \end{align} +\end{subequations} + +In the situation where \\(x(t)\\) is discretised and of finite duration, so that it is defined only at a set of \\(N\\) particular values of time (\\(t\_k; k = 1, \dots, N\\)), we can write a finite Fourier series for \\(k = 1, \dots, N\\): + +\begin{align\*} + x\_k &= x(t\_k) \\\\\\ + &= \frac{a\_0}{2} + \sum\_{n=1}^{N/2} \left( a\_n \cos\left(\frac{2 \pi n t\_k}{T}\right) + b\_n \sin\left(\frac{2 \pi n t\_k}{T}\right) \right) +\end{align\*} + +The coefficients \\(a\_i\\) and \\(b\_i\\) are the Fourier or Spectral coefficients for the function \\(x(t)\\) and they are often displayed in **modulus and phase form**: + +\begin{subequations} + \begin{align} + x\_n &= \sqrt{a\_n^2 + b\_n^2} \\\\\\ + \phi\_n &= t g^{-1} \left( \frac{-b\_n}{a\_n} \right) + \end{align} +\end{subequations} + + +#### Basics of the DFT {#basics-of-the-dft} + +In each case, the input signal is digitized and recorded as a set of \\(N\\) discrete values, evenly spaced in the period \\(T\\) during which the measurement is made. + +There is a basic relationship between the sample length \\(T\\), the number of discrete values \\(N\\), the sampling rate \\(\omega\_s\\) and the range and resolution of the frequency spectrum. + +The range of the spectrum is \\([0, \omega\_\text{max}]\\) (\\(\omega\_\text{max}\\) is called the Nyquist frequency), and the resolution of lines in the spectrum is \\(\Delta\_\omega\\): + +\begin{subequations} + \begin{align} + \omega\_{\text{max}} &= \frac{\omega\_s}{2} = \frac{1}{2} \left( \frac{2\pi N}{T} \right) \\\\\\ + \Delta\_\omega &= \frac{\omega\_s}{N} = \frac{2\pi}{T} + \end{align} +\end{subequations} + +Various algorithms can be used to determine the spectral composition of the sampled signal, however, the most widely used is the **Fast Fourier Transform**. +That however requires \\(N\\) to be an integral power of \\(2\\). + + +#### Aliasing {#aliasing} + +Aliasing originates from the discretisation of the originally continuous time history. +With this discretisation process, the **existence of very high frequencies in the original signal may well be misinterpreted if the sampling rate is too slow**. +These high frequencies will be **indistinguishable** from genuine low frequency components as shown on figure [fig:aliasing](#fig:aliasing). + + + +{{< figure src="/ox-hugo/ewins00_aliasing.png" caption="Figure 14: The phenomenon of aliasing. On top: Low-frequency signal, On the bottom: High frequency signal" >}} + +A signal of frequency \\(\omega\\) and one of frequency \\(\omega\_s-\omega\\) are indistinguishable and this causes a **distortion of the spectrum** measured via the DFT. + +As a result, the part of the signal which has frequency components above \\(\omega\_s/2\\) will appear reflected or **aliased** in the range \\([0, \omega\_s/2]\\). +This is illustrated on figure [fig:effect_aliasing](#fig:effect_aliasing). + + +
+ Table 16: + Alias distortion of spectrum by DFT +
+ +| ![](/ox-hugo/ewins00_aliasing_no_distortion.png) | ![](/ox-hugo/ewins00_aliasing_distortion.png) | +|--------------------------------------------------|-----------------------------------------------------| +| True spectrum of signal | Indicated spectrum from DFT | +| width=\linewidth | width=\linewidth | + +The solution of the problem is to use an **anti-aliasing filter** which subjects the original time signal to a low-pass, sharp cut-off filter. +Because the filters used are inevitably less than perfect, and have a finite cut-off rate, it remains necessary to reject the spectral measurement in a frequency range approaching the Nyquist frequency \\(\omega\_s/2\\). +Typically, the cut-off rate is set to \\(0.5 \times \omega\_s/2\\) for simple filters and \\(0.8 \times \omega\_s/2\\) for more advance filters. +As a results, frequencies near \\(\omega\_s/2\\) may still be contaminated by the imperfected anti-aliasing. + + +#### Leakage {#leakage} + +Leakage is a problem which is a direct **consequence of the need to take only a finite length of time history coupled with the assumption of periodicity**. + + +
+ Table 17: + Sample length and leakage of spectrum +
+ +| ![](/ox-hugo/ewins00_leakage_ok.png) | ![](/ox-hugo/ewins00_leakage_nok.png) | +|--------------------------------------|----------------------------------------| +| Ideal signal | Awkward signal | +| width=\linewidth | width=\linewidth | + +The problem is illustrated on figure [fig:leakage](#fig:leakage). +In the first case (figure [fig:leakage_ok](#fig:leakage_ok)), the signal is perfectly periodic and the resulting spectrum is just a single line at the frequency of the sine wave. +In the second case (figure [fig:leakage_nok](#fig:leakage_nok)), the periodicity assumption is not strictly valid as there is a discontinuity at each end of the sample. +As a result, the spectrum produced for this case does not indicate the single frequency which the original time signal possessed. +Energy has "leaked" into a number of the spectral lines close to the true frequency and the spectrum is spread over several lines. + +Leakage is a serious problem in many applications, **ways of avoiding its effects** are: + +- Changing the duration of the measurement sample length to **match the periodicity of the signal**. + This can only work if the signal is periodic and if the period can be determined +- **Increasing the duration of the measurement** period \\(T\\) so that the separation between the spectral lines (the frequency resolution) is finer. + Although this will not totally remove the leakage effect +- **Adding zeroes to the end of the measured sample** ("zero padding"), thereby partially achieving the preceding result but without requiring more data +- Modifying the sampled signal obtained in such a way as to reduce the severity of the leakage effect. This effect is referred to as **windowing** + + +#### Windowing {#windowing} + +Windowing involves the imposition of a prescribed profile on the time signal prior to performing the Fourier transform. + +The profiles, or "windows" are generally depicted as a time function \\(w(t)\\) as shown in figure [fig:windowing_examples](#fig:windowing_examples). + + + +{{< figure src="/ox-hugo/ewins00_windowing_examples.png" caption="Figure 15: Different types of window. (a) Boxcar, (b) Hanning, (c) Cosine-taper, (d) Exponential" >}} + +The analyzed signal is then \\(x^\prime(t) = x(t) w(t)\\). +The result of using a window is seen in the third column of figure [fig:windowing_examples](#fig:windowing_examples). + +The **Hanning and Cosine Taper windows are typically used for continuous signals**, such as are produced by steady periodic or random vibration, while the **Exponential window is used for transient vibration** applications where much of the important information is concentrated in the initial part of the time record. + +In all cases, a **re-scaling** is required to compensated for the attenuation of the signals by the application of the window. +However, **if both response and excitation signals are subjected to the same window**, and the results are used only to compute an FRF ratio, then the **re-scaling is not necessary**. + + +#### Filtering {#filtering} + +The process of filtering has a direct parallel with windowing. +Common filters are: low-pass, high-pass, band-limited, narrow-band, notch. + + +#### Improving Resolution {#improving-resolution} + + + + +##### Increasing transform size {#increasing-transform-size} + +An immediate solution to this problem would be to use a larger transform. +However, this may not be possible in practice. + + +##### Zero padding {#zero-padding} + +It may be possible to achieve the same resolution increase by adding a series of zeros to the short sample of the actual signal. +Care must be taken in such a procedure are it will smooth the resulting spectrum but no additional information is included. +This can be misleading in some cases. +For instance where two peaks are close and result in only one peak in the smooth data. + + +##### Zoom {#zoom} + +The common solution to the need for finer frequency resolution is to zoom on the frequency range of interest and to concentrate all the spectral lines into a narrow band between \\(\omega\_\text{min}\\) and \\(\omega\_\text{max}\\). + +There are various ways of achieving this result. +The easiest way is to use a frequency shifting process coupled with a controlled aliasing device. + +Suppose the signal to be analyzed \\(x(t)\\) has a spectrum \\(X(\omega)\\) has shown on figure [fig:zoom_range](#fig:zoom_range), and that we are interested in a detailed analysis between \\(\omega\_1\\) and \\(\omega\_2\\). + +If we apply a band-pass filter to the signal, as shown on figure [fig:zoom_bandpass](#fig:zoom_bandpass), and perform a DFT between \\(0\\) and \\((\omega\_2 - \omega\_1)\\), then because of the aliasing phenomenon described earlier, the frequency components between \\(\omega\_1\\) and \\(\omega\_2\\) will appear between \\(0\\) and \\((\omega\_2 - \omega\_1)\\) with the advantage of a finer resolution (see figure [fig:zoom_result](#fig:zoom_result)). + + +
+ Table 18: + Controlled aliasing for frequency zoom +
+ +| ![](/ox-hugo/ewins00_zoom_range.png) | ![](/ox-hugo/ewins00_zoom_bandpass.png) | +|------------------------------------------------|------------------------------------------| +| Spectrum of the signal | Band-pass filter | +| width=\linewidth | width=\linewidth | + + + +{{< figure src="/ox-hugo/ewins00_zoom_result.png" caption="Figure 16: Effective frequency translation for zoom" >}} + +When using zoom the measure FRF in a narrow frequency range, it is important to ensure that there is as little vibration energy as possible outside the frequency range of interest. + + +#### Averaging {#averaging} + +When analyzing **random** vibrations signals, it is not sufficient to compute Fourier transforms (strictly, they do not exist for a random process) and we must instead obtain estimates for the **spectral densities and correlation functions** which are used to characterize this type of signal. + +Although these properties are computed from the Fourier transforms, there are additional considerations concerning their accuracy and statistical reliability which must be given due attention. + +Generally, it is necessary to perform an averaging process, involving several individual time records, or samples, before a result is obtained which can be used with confidence. +The two major considerations which determine the **number of average required** are: + +- the statistical reliability +- the removal of spurious random noise from the signals + +An indication of the requirements from a statistical standpoint may be provided by the "**statistical degrees of freedom**" \\(\kappa\\) which is provided by + +\begin{equation} + \kappa = 2 B T\_t +\end{equation} + +where \\(B\\) is the frequency bandwidth and \\(T\_t\\) is the total time encompassing all data. +\\(T\_t = mT\\) for \\(m\\) samples each of \\(T\\) duration. + +As a guide, this quantity \\(\kappa\\) should be a minimum of \\(10\\) and should approach \\(100\\) for reasonably reliable estimates. + +An other way to average is to apply the DFT on overlapping data. +This is called **overlap averaging**. +It is clear that \\(100\\) averages performed in this way cannot have the same statistical properties as would \\(100\\) completely independent samples. +Nevertheless, the procedure is more effective than if all the data points are only used once. +This manifests by producing smoother spectra. + + +### Use of different excitation signals {#use-of-different-excitation-signals} + +There are three different classes of signals used for the excitation signals: + +- **Periodic**: stepped sine, slow sine sweep, periodic, pseudo random, periodic random +- **Transient**: burst sine, burst random, chirp, impulse +- **Random**: true random, white noise, narrow-band random + +All of these are in widespread use, each having its own particular merits and drawbacks. + + +#### Stepped-Sine testing {#stepped-sine-testing} + +Stepped-sine testing comes from the classical method of measuring the FRF where a discrete sinusoidal with a fixed amplitude and frequency is used. + +In order to encompass a frequency range of interest, the command signal frequency is stepped from one discrete value to another in such a way as to provide the necessary density of points in the FRF plot. +In this technique, it is necessary to ensure that **steady-state** conditions have been attained before the measurements are made and this means delaying the start of the measurement process for a short while after a new frequency has been selected as there will be a transient response. +The extent of the unwanted transient response will depend on: + +- the proximity of the excitation frequency to the natural frequency of the structure +- the abruptness of the changeover from the previous command signal to the new one +- the lightness of the damping of the nearby structural modes + +In practice, this is only in the vicinity of a lightly damped resonance that the necessary delay becomes significant and extra attention is needed. + +One of the advantages is the facility of taking measurements where it is required. +For instance, the typical FRF curve has large region of relatively slow changes of level with frequency (away from resonances and anti-resonances) and in these regions it is sufficient to take measurements at relatively widely spaced frequency points. + + +#### Slow Sine Sweep testing {#slow-sine-sweep-testing} + +This is the traditional method of FRF measurement and involves the use of a sweep oscillator to provide a sinusoidal command signal with a frequency that varies slowly in the range of interest. +It is necessary to check that progress through the frequency range is sufficiently slow to check that steady-state response conditions are attained. +If excessive sweep rate is used, then distortions of the FRF plot are introduced as shown on figure [fig:sweep_distortions](#fig:sweep_distortions). + + + +{{< figure src="/ox-hugo/ewins00_sweep_distortions.png" caption="Figure 17: FRF measurements by sine sweep test" >}} + +One way of checking the suitability of a sweep rate is to make the measurement twice, once sweeping up and the second time sweeping down through the frequency range. +If both curves obtained are the same, the sweep rate is not excessive. + + +#### Periodic Excitation {#periodic-excitation} + +This is very similar to a sine wave test methods, however the input signal contains not one but many frequencies of interest. + +The method of computing the FRF is quite simple: the discrete Fourier transform is computed for both the force and response signals and the **ratio of these transforms gives the FRF**. + +Two types of periodic signals are used: + +- **Deterministic**: all the components are mixed with ordered amplitude and phase relationships (e.g. a square wave) +- **Pseudo-random**: generation of a random mixture of amplitudes and phases for the various frequency components + +The sequence is generated for a duration which equals the period of one sample in the analysis process, and is output repeatedly for several successive cycles. +A particular advantage of this type of excitation is its **exact periodicity** in the analyser bandwidth, resulting in **zero leakage errors** and therefore **requiring no windows** to be applied before its spectral analysis. + +One should not that when there is no need to use a window of any form, as it is the case for periodic signals, then it is very important not to use one. + + +#### Random Excitation {#random-excitation} + + +##### FRF estimates using random excitation {#frf-estimates-using-random-excitation} + +True random excitation are generally applied to the structure using a shaker. + +For a such a random excitation, a different approach is required in order to determine the FRF. + +The principle upon which the FRF is determined using random excitation relies on the following equations + +\begin{subequations} + \begin{align} + S\_{xx}(\omega) &= |H(\omega)|^2 S\_{ff}(\omega)\\\\\\ + S\_{fx}(\omega) &= H(\omega) S\_{ff}(\omega)\\\\\\ + S\_{xx}(\omega) &= H(\omega) S\_{xf}(\omega) + \end{align} +\end{subequations} + +where + +- \\(S\_{xx}(\omega)\\) and \\(S\_{ff}(\omega)\\) are the **autospectra** of the response and excitation signals +- \\(S\_{xf}(\omega)\\) is the **cross spectrum** between these two signals +- \\(H(\omega)\\) is the FRF linking the quantities \\(x\\) and \\(f\\) + +Such parameters can never be measured exactly with only a finite length of data. +However, we have the possibility of providing a **cross check** on the results by using the fact that the **FRF can be estimated using two sets of data**: + +\begin{subequations} + \begin{align} + H\_1(\omega) &= \frac{S\_{fx}(\omega)}{S\_{ff}(\omega)} \\\\\\ + H\_2(\omega) &= \frac{S\_{xx}(\omega)}{S\_{xf}(\omega)} + \end{align} +\end{subequations} + +We now introduce a quantity \\(\gamma^2\\) which is called the **coherence** and which is defined as +\\[ \gamma^2 = \frac{H\_1(\omega)}{H\_2(\omega)}; \quad 0 \le \gamma^2 \le 1 \\] + +Clearly, if all is well with the measurement, the coherence should be unity and we shall be looking for this condition in our test to reassure us that the measurements have been well made. +Small values of the coherence means that the FRF estimate obtained is unreliable and one should determine its cause. + + +##### Noisy Data {#noisy-data} + +There are several situations in which an imperfect measurement might be made, and a low coherence recorded. +There may well be noise on one or other of the two signals which could degrade the measured spectra: + +- **Near resonance**: this is likely to influence the **force signal** so that \\(S\_{ff}(\omega)\\) becomes vulnerable and \\(H\_1(\omega)\\) will suffer the most, \\(H\_2(\omega)\\) might be a better indicator in that case +- **Near anti-resonance**: it is the **response signal** which will suffer, making \\(S\_{xx}(\omega)\\) liable to errors and this is the opposite for \\(H\_1(\omega)\\) and \\(H\_2(\omega)\\) + +This is shown by the following equations: + +\begin{subequations} + \begin{align} + H\_1(\omega) &= \frac{S\_{fx}(\omega)}{S\_{ff}(\omega) + S\_{nn}(\omega)} \\\\\\ + H\_2(\omega) &= \frac{S\_{xx}(\omega) + S\_{mm}(\omega)}{S\_{xf}(\omega)} + \end{align} +\end{subequations} + +where \\(S\_{mm}(\omega)\\) and \\(S\_{nn}(\omega)\\) are the autospectra of the noise on the output and input, \\(m(t)\\) and \\(n(t)\\) respectively. + +One suggestion which has been made is to define the FRF as the geometric mean of the two standard estimates: + +\begin{equation} + H\_v(\omega) = \sqrt{H\_1(\omega) H\_2(\omega)} +\end{equation} + +Low coherence can arise when **more than one excitation is applied** to the structure. +Another possibility is that the structure is **not completely linear**. +Here again, the measured response cannot be completely attributed to the measured excitation. + + +##### Noise-free FRF estimates {#noise-free-frf-estimates} + +A third estimator for the FRF can be defined in cases of random excitation, which is called the **instrumental variable estimate**, or \\(H\_3(\omega)\\). + +This formula for the FRF is only possible if more than the usual two channels are being measured simultaneously. +The formula is of interest because it does provide an estimate for the FRF which is unbiased by noise on either the force or the response transducer signals. +The formula is: + +\begin{equation} + H\_3(\omega) = \frac{S\_{xv}(\omega)}{S\_{fv}(\omega)} +\end{equation} + +where \\(v(t)\\) is a third signal in the system, such as the voltage supplied to the exciter, and it exploits the fact that noise on either input (force) or output (response) channels does not contaminate cross-spectral density estimates in the way that auto spectra are affected. + + +##### Leakage {#leakage} + +It is known that a low coherence can arise in a measurement where the frequency resolution of the analyzer is not fine enough to describe adequately the very rapidly changing functions such as are encountered near resonance and anti-resonance on lightly-damped structures. + +This is known as a **bias** error and leakage is often the most likely source of low coherence on lightly-damped structures as shown on figure [fig:coherence_resonance](#fig:coherence_resonance). + + + +{{< figure src="/ox-hugo/ewins00_coherence_resonance.png" caption="Figure 18: Coherence \\(\gamma^2\\) and FRF estimate \\(H\_1(\omega)\\) for a lightly damped structure" >}} + +It can be shown that near resonance, \\(H\_2(\omega)\\) is a much more accurate representation of the true FRF than \\(H\_1(\omega)\\). +When this situation is encountered, the best solution is usually to make a zoom measurement as explained previously. + + +##### Postscript {#postscript} + +It is sometimes though that a poor coherence can be eliminated by taking many averages, but this is only possible if the reason for the low coherence is **random noise** which can be averaged out over a period of time. +If the reason if more systematic than that, the averaging will not help. + +Lastly, mention should be made here of a type of excitation referred to as "periodic random" which is, in fact, a combination of pseudo-random and "true" random. +In this process, a pseudo-random (or periodic) excitation is generated and after a few cycles, a measurement of the input and the now steady-state response is made. +Then, a different pseudo-random sequence is generated, the procedure repeated and the result treated as the second sample in what will develop to be an ensemble of random samples. +The advantage over the simple random excitation is that due to the essential periodic nature of each of the pseudo-random samples, there are no leakage or bias errors in any of the measurements. +However, the cost is an increase in the measurement time as one has to wait for the steady response condition. + + +#### Transient excitation {#transient-excitation} + +There are three types of excitation to be included in this section because they all share the same principle for their signals processing. +They are: + +1. **Burst** excitation: a short section of signal +2. **Rapid sine sweep** (chirp) excitation +3. **Impact** excitation from a hammer blow + +The first and second of these generally require an attached shaker, but the last one can be implemented with a hammer. + +The principle which all these signals share is that the excitation and the consequent response are completely contained within the single sample of measurement which is made. +In practice, it is common to repeat the transient even more than once and to **average** the results to get the final result. +How they differ is in the exact form of the transient excitation signal and in the nature of the repeated application. + +In the burst type of signal, we have an excitation which is applied and analyzed as if it were a continuous signal, taking the successive samples for averaging one immediately after the other. +For the chirp and impulse excitations, each individual sample is collected and processed before making the next one, and averaged. + + +##### Burst excitation signals {#burst-excitation-signals} + +Burst excitation signals consist of short sections of an underlying continuous signal (which may be a sine wave, a sine sweep or a random signal), followed by a period of zero output, resulting in a response which shows a transient build-up followed by a decay (see figure [fig:burst_excitation](#fig:burst_excitation)). + + + +{{< figure src="/ox-hugo/ewins00_burst_excitation.png" caption="Figure 19: Example of burst excitation and response signals" >}} + +The duration of the burst is under the control of the operator and it is selected so as to provide the ideal signal processing conditions, which are essentially that the **response signal has just died away by the end of the measurement period**. +If this condition has not been attained (burst too long), then leakage error will result. +If it has been reached well before the end of the period (burst too short), then the signal quality will be poor. + +The final measurement will be the result of averaging several samples. +In the case of the burst sine excitation, each sample would be expected to be identical so that the averaging serves only to remove noise on the signals. +In the case of burst random, however, each individual burst will be different to the other and so in this case there is an element of averaging randomly varying behavior, a feature which is believed in some cases to enhance the measurement in the presence of weak non-linearities in the test structure. + + +##### Chirp excitation {#chirp-excitation} + +The chirp consist of a short duration signal which has the form shown in figure [fig:chirp_excitation](#fig:chirp_excitation). + +The frequency content of the chirp can be precisely chosen by the starting and finishing frequencies of the sweep. + + + +{{< figure src="/ox-hugo/ewins00_chirp_excitation.png" caption="Figure 20: Example of chirp excitation and response signals" >}} + + +##### Impulsive excitation {#impulsive-excitation} + +The hammer blow produces an input and response as shown in the figure [fig:impulsive_excitation](#fig:impulsive_excitation). + +This and the chirp excitation are very similar in the analysis point of view, the main difference is that the chirp offers the possibility of greater control of both amplitude and frequency content of the input and also permits the input of a greater amount of vibration energy. + + + +{{< figure src="/ox-hugo/ewins00_impulsive_excitation.png" caption="Figure 21: Example of impulsive excitation and response signals" >}} + +The frequency content of the hammer blow is dictated by the **materials** involved and is rather more difficult to control. +However, it should be recorded that in the region below the first cut-off frequency induced by the elasticity of the hammer tip structure contact, the spectrum of the force signal tends to be **very flat**. + +On some structures, the movement of the structure in response to the hammer blow can be such that it returns and **rebounds** on the hammer tip before the user has had time to move that out of the way. +In such cases, the spectrum of the excitation is seen to have "holes" in it at certain frequencies (figure [fig:double_hits](#fig:double_hits)). + + + +{{< figure src="/ox-hugo/ewins00_double_hits.png" caption="Figure 22: Double hits time domain and frequency content" >}} + +In order to perform the required Fourier analysis of all these cases of transient signals, an assumption is made that the data obtained from a single event can be regarded as representing one period of a **quasi-periodic process**. +This means that if exactly the same input was applied \\(T\\) seconds after the first one, then exactly the same response would be observed. + +This can be difficult to obtain especially for lightly damped structures as the signal will take long time to die away. +In that case, one solution is to lengthen the period \\(T\\), but often this is not easily changeable. +A **window** applied to the raw data provides a more practical solution. +It is recommended to apply an **exponential window** to both signals. +By choosing an appropriate exponential decay rate, the modified signal can be made to have effectively died away by the end of the prescribed measurement period, thereby satisfying the signal processing needs. + +However, one should be cautious when using such windowing as complex modes can be extracted from data treated this way. + +An alternative to this problem is to use the **zoom** facility. +One of the consequences of using a zoom is that the frequency band is reduced by a proportionate amount. +However, by making a number (equal to the zoom factor) of separate measurements, each one for a different zoom band, it is possible to construct and FRF over the entire frequency range of interest with both the advantage of it being a window-free measurement and having a much finer frequency resolution. + +One the pseudo-periodicity is established, a discrete Fourier series description can be obtained of both the input and response signals. +The FRF can be computed from +\\[ H(\omega\_k) = \frac{X(\omega\_k)}{F(\omega\_k)} \\] + +Alternately, the force signals can be treated in the same way as for random excitation, and the formulae for \\(H\_1(\omega)\\) and \\(H\_2(\omega)\\) are used. +However, care must be exercise when interpreting the results since the **coherence function has different significance here**. + +One of the parameters indicated by the coherence is the statistical reliability of an estimate based on a number of averages of a **random** process. +In the case of an FRF estimate obtained by treating the signals from a succession of nominally identical impacts as a random process, we must note that, strictly, each such sample is a **deterministic**, and not probabilistic, calculation and should contain no statistical uncertainty. + +Thus, the **main source for low coherence** in this instance can only be **leakage errors**, **non-linearity** or **high noise levels**, not the same situation as for random excitation. + +Another feature usually employed in transient testing is that of making a whole series of repeat measurements under nominally identical conditions and then averaging FRF estimates. +The idea behind this is that any one measurement is likely to be contaminated by noise, especially in the frequency regions away from resonance where the response levels are likely to be quite low. +While this averaging technique does indeed enhance the resulting plots, it may well be that several tens of samples need to be acquired before a smooth FRF plot is obtained and this will diminish somewhat any advantage of speed which is a potential attraction to the method. + + +### Calibration {#calibration} + +For all measurement processes, it is necessary to **calibrate** the equipment which is used. +In the case of FRF measurements, there are **two levels of calibration** which should be made: + +- The first of these is a periodic **absolute calibration of individual transducers** (of force and response) to check that their sensitivities are sensibly the same as those specified by the manufacturer. + Any marked deviation could indicate internal damage +- The second type of calibration is one which can and should be carried out during each test, preferably twice, once at the outset and again at the end. + This type of calibration is one which provides the **overall sensitivity of the complete instrumentation system** without examining the performance of the individual elements. + +The first type of calibration is quite difficult to make accurately as it requires independent measurement of the quantity of interest. +The use of another transducer of the same type is not satisfactory as it is not strictly an independent measure. +Optical devices can be used for the calibration of displacement sensors. + +One of the reasons why the absolute type of calibration has not been further developed for this particular application is the availability of a different type of calibration which is particularly attractive and convenient. +The parameters measured in a modal analysis are usually ratios between response and force levels, and so what is required is the ability to calibrate the whole measurement system. +The voltage measured are related to the physical quantities (force and acceleration) by the sensitivities of the respective transducers: + +\begin{subequations} + \begin{align} + v\_f &= E\_f f \\\\\\ + v\_{\ddot{x}} &= E\_{\ddot{x}} \ddot{x} + \end{align} +\end{subequations} + +As mentioned, the difficulty is to determine the individual values for \\(E\_f\\) and \\(E\_{\ddot{x}}\\). +In practice, we only ever use the measured voltages as a **ratio** to obtain the FRF +\\[ \frac{\ddot{x}}{f} = \frac{v\_{\ddot{x}}}{v\_f} \frac{E\_f}{E\_{\ddot{x}}} = E \frac{v\_{\ddot{x}}}{v\_f} \\] +and so **what is required is the ratio of the two sensitivities**: + +\begin{equation} + E = \frac{E\_f}{E\_{\ddot{x}}} +\end{equation} + +The overall sensitivity can be more readily obtained by a calibration process because we can easily make an independent measurement of the quantity now being measured: the ratio of response to force. +Suppose the response parameter is acceleration, then the FRF obtained is inertance which has the units of \\(1/\text{mass}\\), a quantity which can readily be independently measured by other means. + +Figure [fig:calibration_setup](#fig:calibration_setup) shows a typical calibration setup. + + + +{{< figure src="/ox-hugo/ewins00_calibration_setup.png" caption="Figure 23: Mass calibration procedure, measurement setup" >}} + +A calibration procedure of this type has the distinct advantage that it is very easy to perform and can be carried out with all the measurement equipment. +Thus, frequent checks on the overall calibration factors are strongly recommended, ideally as the beginning and end of each test. + + +### Mass Cancellation {#mass-cancellation} + +It is very important the ensure that the force is measured directly at the point at which it is applied to the structure, rather than deducing its magnitude from the current flowing in the shaker coil or other similar **indirect** processes. +This is because near resonance, the actual applied force becomes very small and is thus very prone to inaccuracy. + +This same argument applies on a lesser scale as we examine the detail around the attachment to the structure, as shown in figure [fig:mass_cancellation](#fig:mass_cancellation). + + + +{{< figure src="/ox-hugo/ewins00_mass_cancellation.png" caption="Figure 24: Added mass to be cancelled (crossed area)" >}} + +Here, we see part of the structure, an accelerometer and a force transducer. +The dashed line shows the plane at which the force is actually measured. +Now, assuming that the extra material (shown by the cross hatching) behaves as a rigid mass \\(m^\*\\), we can state that the force actually applied to the structure \\(f\_t\\) is different from that measured by the transducer \\(f\_m\\) by an amount dependent on the acceleration level at the drive point \\(\ddot{m}\\) according to + +\begin{equation} + f\_t = f\_m - m^\* \ddot{x} +\end{equation} + +Physically, what is happening is that some of the measured force is being "used" to move the additional mass so that the force actually applied to the structure is the measured force minus the inertia force of the extra mass. + +Now, the frequency response quantity we actually require is \\(A\_t(\omega)\\) although we have measurements of \\(\ddot{X}\\) and \\(F\_m\\) only, yielding to \\(A\_m(\omega)\\). +If we express it in its real and imaginary parts, we obtain: + +\begin{align\*} + \text{Re}(F\_t) &= \text{Re}(F\_m) - m^\* \text{Re}(\ddot{X}) \\\\\\ + \text{Im}(F\_t) &= \text{Im}(F\_m) - m^\* \text{Im}(\ddot{X}) +\end{align\*} + +And + +\begin{align\*} + \text{Re}(1/A\_t) &= \text{Re}(1/A\_m) - m^\* \\\\\\ + \text{Im}(1/A\_t) &= \text{Im}(1/A\_m) +\end{align\*} + +Mass cancellation is important when the mass to be cancelled (\\(m^\*\\)) is of the same order as the apparent mass of the modes of the structure under test, and this latter is a quantity which varies from point to point on the structure. +If we are near an anti-node of a particular mode, then the apparent mass (and stiffness) will tend to be relatively small and here mass cancellation may be important. + +One important feature of mass cancellation is that it can only be applied to point measurements (where the excitation and response are both considered at the same point). +This arises because the procedure described above corrects the measured force for the influence of the additional mass at the drive point. + +It should be noted that the transducer's inertia is also effective not only in the direction of the excitation but also laterally and in rotation even though they cannot easily be compensated for. + + +### Rotational FRF measurement {#rotational-frf-measurement} + + +#### Significance of rotational FRF data {#significance-of-rotational-frf-data} + +\\(\SI{50}{\%}\\) of all DOFs are rotations (as opposed to translations) and \\(\SI{75}{\%}\\) of all frequency response functions involve rotation DOFs. +However, it is relatively rate the find reference to methods for measurements of rotational DOFs. +This situation arises from a considerable difficulty which is encountered when trying to measure either rotational responses or excitations and also when trying to apply rotational excitation. + + +#### Measurement of Rotational FRFs using two or more transducers {#measurement-of-rotational-frfs-using-two-or-more-transducers} + +There are two problems to be tackled: + +1. measurement of rotational responses +2. generation of measurement of rotation excitation + +The first of these is less difficult and techniques usually use a pair a matched conventional accelerometers placed at a short distance apart on the structure to be measured as shown on figure [fig:rotational_measurement](#fig:rotational_measurement). + + + +{{< figure src="/ox-hugo/ewins00_rotational_measurement.png" caption="Figure 25: Measurement of rotational response" >}} + +The principle of operation is that by measuring both accelerometer signals, the responses \\(x\_0\\) and \\(\theta\_0\\) can be deduced by taking the mean and difference of \\(x\_A\\) and \\(x\_B\\): + +\begin{subequations} + \begin{align} + x\_0 &= 0.5(x\_A + x\_B) \\\\\\ + \theta\_0 &= (x\_A - x\_B)/l \label{eq:rotational\_diff} + \end{align} +\end{subequations} + +This approach permits us to measure half of the possible FRFs: all those which are of the \\(X/F\\) and \\(\Theta/F\\) type. +The others can only be measured directly by applying a moment excitation. + +Figure [fig:rotational_excitation](#fig:rotational_excitation) shows a device to simulate a moment excitation. +First, a single applied excitation force \\(F\_1\\) corresponds to a simultaneous force \\(F\_0 = F\_1\\) and a moment \\(M\_0 = -F\_1 l\_1\\). +Then, the same excitation force is applied at the second position that gives a force \\(F\_0 = F\_2\\) and moment \\(M\_0 = F\_2 l\_2\\). +By adding and subtracting the responses produced by these two separate excitations conditions, we can deduce the translational and rotational responses to the translational force and the rotational moment separately, thus enabling the measurement of all four types of FRF: \\(X/F\\), \\(\Theta/F\\), \\(X/M\\) and \\(\Theta/M\\). + + + +{{< figure src="/ox-hugo/ewins00_rotational_excitation.png" caption="Figure 26: Application of moment excitation" >}} + +Then, the full \\(6 \times 6\\) mobility matrix can be measured, however this procedure is quite demanding. + +Other methods for measuring rotational effects include specially developed rotational accelerometers and shakers. + +However, there is a major problem that is encountered when measuring rotational FRF: the translational components of the structure's movement tends to overshadow those due to the rotational motions. +For example, the magnitude of the difference in equation [eq:rotational_diff](#eq:rotational_diff) is often of the order of \\(\SI{1}{\%}\\) of the two individual values which is similar to the transverse sensitivity of the accelerometers: potential errors in rotations are thus enormous. + + +### Multi-point excitation methods {#multi-point-excitation-methods} + + +#### Multi-point excitation in general {#multi-point-excitation-in-general} + +Multi-excitation methods for modal testing, called **MIMO test methods**, have been developed for FRF data which possesses a **high degree of consistency**. +There are other benefits: + +- the excitation of large structure with multiple points does more closely simulates their vibration environment in service than the single point excitation test +- the facility of detecting and identifying double or repeated modes +- the need to complete some tests in a very minimum of on-structure time + +Although the majority of modal tests are still performed using single-point excitation procedure, multi-point excitation is today well developed and is largely used for aerospace structures. + +The practical implementation of the different methods currently used are briefly discussed. + + +#### Appropriation or Normal mode testing {#appropriation-or-normal-mode-testing} + +We here seek **establish vibration in a pure mode of vibration by careful selection of the locations and magnitudes of a set of sinusoidal excitation forces**. + +This works for undamped system's natural frequencies, and in that case the force and response vectors are exactly in quadrature: +\\[ i\\{X\\} = [H\_\text{Re}(\omega) + i H\_\text{Im}(\omega)] \\{F\\} \\] + +It follows that this equation is valid only if \\(\det |H\_{\text{Re}}(\omega)| = 0\\) and this condition provides the basis of a **method to locate the undamped system natural frequencies** from measured FRF data. + + +#### Multi-phase stepped-sine (MPSS) testing {#multi-phase-stepped-sine--mpss--testing} + +We here excite a MDOF system at a single sinusoidal frequency \\(a\\) by a set of \\(p\\) excitation forces \\(\\{F\\}e^{i\omega t}\\) such that there is a set of steady-state responses \\(\\{X\\}e^{i\omega t}\\). +The two vectors are related by the system's FRF properties as: + +\begin{equation} + \\{X\\}\_{n\times 1} = [H(\omega)]\_{n\times p} \\{F\\}\_{p\times 1} +\end{equation} + +However, it is not possible to derive the FRF matrix from the single equation [eq:mpss_equation](#eq:mpss_equation), because there will be insufficient data in the two vectors (one of length \\(p\\), the other of length \\(n\\)) to define completely the \\(n\times p\\) FRF matrix. + +What is required is to make a series of \\(p^\prime\\) measurements of the same basic type using different excitation vectors \\(\\{F\\}\_i\\) that should be chosen such that the forcing matrix \\([F]\_{p\times p^\prime} = [\\{F\\}\_1, \dots, \\{F\\}\_p]\\) is non-singular. +This can be assured if: + +- there are at least as many vectors as there are forces: \\(p^\prime > p\\) +- the individual force vectors are linearly independent of each other + +A second matrix is also constructed containing the response vectors \\([X]\_{n\times p^\prime} = [\\{X\\}\_1, \dots, \\{X\\}\_{p^\prime}]\\). +Now, these two collections of measured data can be used to determine the required FRF matrix: + +\begin{equation} + [H(\omega)]\_{n\times p} = [X]\_{n\times p^\prime} [F]\_{p^\prime \times p}^+ +\end{equation} + +where \\(+\\) denotes the generalized inverse of the forcing matrix. + + +#### Multi-point random (MPR) testing {#multi-point-random--mpr--testing} + + +##### Concept {#concept} + +In this method, advantage is taken of the incoherence of several uncorrelated random excitations which are applied simultaneously at several points. +Then, the need to repeat the test several times, as was necessary for the MPSS method, is avoided. + +The purpose of this methods is to obtain the FRF data in an optimal way and to reduce the probability of introducing systematic errors to the FRF measurements. + +Let's consider the simplest form of a multi excitation as that of a system excited by two simultaneous forces \\(f\_1(t)\\) and \\(f\_2(t)\\) where the response \\(x\_i(t)\\) is of particular interest. +We can derive expressions for the required FRF parameters functions of the auto and cross spectral densities between of three parameters of interest: + +\begin{subequations} + \begin{align} + H\_{i1}(\omega) &= \frac{S\_{1i}S\_{22} - S\_{2i}S\_{12}}{S\_{11}S\_{22} - S\_{12}S\_{21}}\\\\\\ + H\_{i1}(\omega) &= \frac{S\_{2i}S\_{11} - S\_{1i}S\_{21}}{S\_{11}S\_{22} - S\_{12}S\_{21}} + \end{align} +\end{subequations} + +These expressions can be used provided that \\(S\_{11}S\_{22}\neq |S\_{12}|^2\\) which is equivalent of that the two excitation forces must not be fully correlated. + + +##### General formulation {#general-formulation} + +In practice, the method is applied using different numbers of exciters, and several response points simultaneously. +We have that + +\begin{equation} + [H\_{xf}(\omega)]\_{n\times p} = [S\_{xf}(\omega)]\_{n\times p} [S\_{ff}(\omega)]\_{p\times p}^{-1} +\end{equation} + +where it can be seen that the matrix of spectral densities for the forces \\([S\_{ff}(\omega)]\_{p\times p}\\) must be non singular. +Thus, care must be taken in practice to ensure this condition, noting that it is the applied forces and not the signal sources which must meet the requirement. + +In practice, this is difficult to obtain as even if the input signals to the exciters' amplifiers are uncorrelated, the forces applied to the structure will certainly not be. This is particularly true near the resonances as the dynamic response is dominated by the one mode which is independent of the actual force pattern. + + +##### Coherence in MPR measurements {#coherence-in-mpr-measurements} + +In a similar way in which we defined coherence for the SISO system, we can make use of the same concepts for a MIMO system. +During a MIMO test, we basically measure three matrices: +\\[ [S\_{ff}(\omega)]; \ [S\_{xx}(\omega)]; \ [S\_{fx}(\omega)] \\] +Then, we can derive an estimate for the FRF matrix: +\\[ H\_1(\omega)^T = [S\_{ff}(\omega)]^{-1} [S\_{fx}(\omega)] \\] +and then compute an estimate for the autospectrum of the response from: + +\begin{align\*} + [\tilde{S}\_{xx}(\omega)] &= [H\_1^\*(\omega)] [S\_{fx}(\omega)] \\\\\\ + &= [S\_{xf}(\omega)] [S\_{ff}(\omega)]^{-1} [S\_{fx}(\omega)] +\end{align\*} + +Now, by comparing the estimated response spectrum \\([\tilde{S}\_{xx}(\omega)]\\) with the actual measurement \\([S\_{xx}(\omega)]\\), we obtain a formula for the multiple coherence between the two parameters \\(\\{f(t)\\}\\) and \\(\\{x(t)\\}\\): + +\begin{equation\*} + \tcmbox{[\gamma^2(\omega)] = [S\_{xx}(\omega)]^{-1} [S\_{xf}(\omega)] [S\_{ff}(\omega)]^{-1} [S\_{fx}(\omega)]} +\end{equation\*} + + +#### Multiple-reference impact tests {#multiple-reference-impact-tests} + +This class of hammer excitation is referred to as **Multi-reference Impact Tests** (**MRIT**). +Typically, three response references are measured (often, the \\(x\\), \\(y\\) and \\(z\\) components at the response measurement location) every time a hammer blow is applied to the structure. + +FRF data collected by performing a test in this way will be the equivalent of exciting the structure at three points simultaneously while measuring the response at each of the \\(n\\) points of interest. +Thus, in the same sense that a multiple-input test is a multi-reference measurement (measuring several columns of the FRF matrix), so too is the MRIT since it provides a multi-reference measurement including several rows of the same FRF matrix. + + +## Modal Parameter Extraction Methods {#modal-parameter-extraction-methods} + + +### Introduction {#introduction} + + +#### Introduction to the concept of modal analysis {#introduction-to-the-concept-of-modal-analysis} + +This section describes some of the many procedures that are used for **Modal Analysis** and attempts to explain their various advantages and limitations. +These methods generally consists of **curve-fitting a theoretical expression for an individual FRF to the actual measured data**. + +
+
+ +1. **part of single FRF curve** +2. **complete curve** encompassing several resonances +3. **a set of many FRF plots** all on the same structure + +
+ +In every case, the task is basically to **find the coefficients in a theoretical expression** for the FRF which then most closely **matches the measured data**. + +This phase of the modal test procedure is often referred to as **modal parameter extraction** or **modal analysis**. + + +#### Types of modal analysis {#types-of-modal-analysis} + +A majority of current curve-fitting methods operate on the response characteristics in the frequency domain, but there are other procedures which perform a curve-fit in the time domain. These latter methods are based on the fact that the Impulse Response Function is another characteristic function of the system. + +Modal analysis methods can be classified into a series of different groups. + +
+
+ +It depends on the **domain in which the analysis is performed**: + +- frequency domain of FRFs +- Time domain of IRFs + +
+ +
+
+ +Next, it is appropriate to consider the **frequency range** over which each individual analysis will be performed. +Either a single mode is to be extracted at a time, or several: + +- **SDOF methods** +- **MDOF methods** + +
+ +
+
+ +A further classification relates to the **number of FRFs** which are to be included in a single analysis: + +- **SISO**: the FRF are measured individually +- **SIMO**: a set of FRF are measured simultaneously at several response points but under the same single-point excitation. + This describes the FRFs in a column or row of the FRF matrix +- **MIMO**: the responses at several points are measured simultaneously while the structure is excited at several points, also simultaneously + +
+ + +#### Difficulties due to damping {#difficulties-due-to-damping} + +Many of the problems encounter in practice are related to the difficulties associated with the **reliable modeling of damping effects**. +In practice, we are obliged to make certain assumptions about what model is to be used for the damping effects. +Sometimes, significant errors can be obtained in the modal parameter estimates (and not only in the damping parameters), as a result of a conflict between the assumed damping behavior and that which actually occurs in reality. + +Another difficulty is that of **real modes and complex modes**. +In practice, all modes of practical structures are expected to be complex, although in the majority of cases, such complexity will be very small, and often quite negligible. + + +#### Difficulties of model order {#difficulties-of-model-order} + +One problem is determining **how many modes are there in the measured FRF**. + +This question is one of the most difficult to resolve in many practical situations where a combination of finite resolution and noise in the measured data combined to make the issue very unclear. + +Many modern modal analysis curve-fitters are capable of fitting any FRF of almost any order, however, it might fit **fictitious modes** introduced in the analysis process. +Correct **differentiation between genuine and fictitious modes** remains a critical task in many modal tests. + + +### Preliminary checks of FRF data {#preliminary-checks-of-frf-data} + + +#### Visual Checks {#visual-checks} + +Before starting the modal analysis of any measured FRF data, it is always important to do a few **simple checks** in order to ensure that no obvious error is present in the data. +Most of the checks are made using a **log log plot of the modulus of the measured FRF**. + + +##### Low-frequency asymptotes {#low-frequency-asymptotes} + +If the structure is **grounded**, then we should clearly see a **stiffness-like** characteristic, appearing as asymptotic to a stiffness line at the lowest frequencies (below the first resonance) and the magnitude of this should correspond to that of the static stiffness of the structure at the point in question. + +If the structure has been tested in a free condition, then we should expect to see a **mass-line** asymptote where its magnitude may be deduced from purely rigid-body considerations. + +Deviations from this expected behavior may be caused by the frequency range of measurement not extending low enough to see the asymptotic trend, or they may indicate that the required support conditions have not in fact been achieved. + + +##### High-frequency asymptotes {#high-frequency-asymptotes} + +In the upper end of the frequency range, is it sometimes found (especially on point mobility measurements), that the curve becomes asymptotic to a **mass line** or, more usually to a **stiffness line**. +Such situation can result in considerable difficulties for the modal analysis process and reflects a situation where the excitation is being applied at a point of very high mass of flexibility. +Then, modal parameters are difficult to extracts as they are overwhelmed by the dominant local effects. + + +##### Incidence of anti-resonances {#incidence-of-anti-resonances} + +For a **point FRF**, there must be **antiresonance after each resonances**, while for transfer FRFs between two points well-separated on the structure, we should expect **more minima than antiresonances**. + +A second check to be made is that the resonance peaks and the antiresonances exhibit the **same sharpness on a log-log plot**: + +- Frequency resolution limitation will cause blunt resonances +- Inadequate vibration levels results in poor definition of the antiresonance regions + + +##### Overall shape of FRF skeleton {#overall-shape-of-frf-skeleton} + +The relative position of the resonance, antiresonances and ambient levels of the FRF curve can give information on the validity of the data. +This will be further explained. + + +##### Nyquist plot inspection {#nyquist-plot-inspection} + +When plotting the FRF data in a Nyquist format, we expect that each resonance traces out at least **part of a circular arc**, the extent of which depends largely on the interaction between adjacent modes. +For a system with well-separated modes, it is to be expected that each resonance will generate the major part of a circle, but when modal interference increases, only small segments will be identifiable. +However, within these bounds, the Nyquist plot should ideally exhibit a smooth curve, and failure to do so may be an indication of a poor measurement technique. + + +#### Assessment of multiple-FRF data set using SVD {#assessment-of-multiple-frf-data-set-using-svd} + +When several FRFs are acquired (either from SIMO or MIMO data), the **Singular Value Decomposition** has proved to be a very useful tool to check the **quality**, **reliability** and **order** of the data. + +The set of FRF which are to be assessed is stored in a series of vectors \\(\\{H\_{jk}(\omega)\\}\\) each of which contains the values for one FRF at all measured frequencies \\(\omega = \omega\_1, \dots, \omega\_L\\). +These vectors are assembled into a matrix +\\[ [A]\_{L\times np} = [\\{H\_{11}(\omega)\\}\_{L\times 1} \\{H\_{21}(\omega)\\}\_{L\times 1} \dots \\{H\_{np}(\omega)\\}\_{L\times 1} ] \\] +where \\(n\\) and \\(p\\) represent the number of measured DOFs and the number of excitation points. +\\(L\\) represents the number of frequencies at which the FRF data are defined. + +
+
+ +\begin{equation} + [A]\_{L\times np} = [U]\_{L\times L} [\Sigma]\_{L\times np} [V]\_{np\times np}^T +\end{equation} + +
+ +
+
+ +- The **singular values** \\(\sigma\_1, \dots, \sigma\_w\\) describes the **amplitude** information +- Number of **non-zero singular values** represents the **order of the system** (i.e. the number of independent modes of vibration which effectively contribute to the measured FRFs) +- The columns of \\([U]\\) represent the **frequency distribution** of these amplitudes +- The columns of \\([V]\\) represent their **spatial distribution** + +
+ +
+
+ +From the SVD, we can compute a new matrix \\([P]\_{L\times np}\\) which is referred to as the **Principal Response Function** (**PRF**) matrix. +Each column of the PRF contains a response function corresponding to one of the original FRFs: + +\begin{equation} + [U]\_{L\times L} [\Sigma]\_{L\times np} = [P]\_{L\times np} +\end{equation} + +Then, each PRF is, simply, a particular combination of the original FRFs, and thus each FRF contains all the essential information included in those FRFs (eigenvalues for instance). + +
+ +On example of this form of pre-processing is shown on figure [fig:PRF_numerical](#fig:PRF_numerical) for a numerically-simulation test data, and another in figure [fig:PRF_measured](#fig:PRF_measured) for the case of real measured test data. + +The second plot [fig:PRF_numerical_svd](#fig:PRF_numerical_svd) helps to determine the true order of the system because the number of non-zero singular values is equal to this parameter. +The third plot [fig:PRF_numerical_PRF](#fig:PRF_numerical_PRF) shows the genuine modes distinct from the computational modes. + +
+
+ +It can be seen that the PRFs tend to tall into **two groups**: + +- The most prominent are a set of response function, each of which has a small number of dominant peaks. + It represents the **physical modes** of the system. +- The lower group shows less distinct and clear-cut behavior. + It represents the **noise or computational modes** present in the data. + +The two groups are usually separated by a clear gap (depending of the noise present in the data): + +- If such gap is present, then is will be possible to extract the properties of the \\(m\\) modes which are active in the measured responses over the frequency range covered. +- If not, then it may be impossible to perform a successful modal parameter extraction. + +
+ + +
+ Table 19: + FRF and PRF characteristics for numerical model +
+ +| ![](/ox-hugo/ewins00_PRF_numerical_FRF.png) | ![](/ox-hugo/ewins00_PRF_numerical_svd.png) | ![](/ox-hugo/ewins00_PRF_numerical_PRF.png) | +|---------------------------------------------|---------------------------------------------|---------------------------------------------| +| FRF | Singular Values | PRF | +| width=\linewidth | width=\linewidth | width=\linewidth | + + +
+ Table 20: + FRF and PRF characteristics for measured model +
+ +| ![](/ox-hugo/ewins00_PRF_measured_FRF.png) | ![](/ox-hugo/ewins00_PRF_measured_svd.png) | ![](/ox-hugo/ewins00_PRF_measured_PRF.png) | +|--------------------------------------------|--------------------------------------------|--------------------------------------------| +| FRF | Singular Values | PRF | +| width=\linewidth | width=\linewidth | width=\linewidth | + + +#### Mode Indicator Functions (MIFs) {#mode-indicator-functions--mifs} + + +##### General {#general} + +The Mode Indicator Functions are usually used on \\(n\times p\\) FRF matrix where \\(n\\) is a relatively large number of measurement DOFs and \\(p\\) is the number of excitation DOFs, typically 3 or 4. + +In these methods, the frequency dependent FRF matrix is subjected to an eigenvalue or singular value decomposition analysis which thus yields a small number (3 or 4) of eigen or singular values, these also being frequency dependent. + +These methods are used to **determine the number of modes** present in a given frequency range, to **identify repeated natural frequencies** and to pre process the FRF data prior to modal analysis. + + +##### Complex mode indicator function (CMIF) {#complex-mode-indicator-function--cmif} + +The Complex Mode Indicator Function is defined simply by the SVD of the FRF (sub) matrix. + +
+
+ +This decomposition, is defined as + +\begin{align\*} + [H(\omega)]\_{n\times p} &= [U(\omega)]\_{n\times n} [\Sigma(\omega)]\_{n\times p} [V(\omega)]\_{p\times p}^H\\\\\\ + [CMIF(\omega)]\_{p\times p} &= [\Sigma(\omega)]\_{p\times n}^T [\Sigma(\omega)]\_{n\times p} +\end{align\*} + +
+ +The actual mode indicator values are provided by the squares of the singular values and are usually plotted as a function of frequency in logarithmic form as shown in figure [fig:mifs](#fig:mifs): + +- **Natural frequencies are indicated by large values of the first CMIF** (the highest of the singular values) +- **double or multiple modes by simultaneously large values of two or more CMIF**. + +Associated with the CMIF values at each natural frequency \\(\omega\_r\\) are two vectors: + +- the left singular vector \\(\\{U(\omega\_r)\\}\_1\\) which approximates the **mode shape** of that mode +- the right singular vector \\(\\{V(\omega\_r)\\}\_1\\) which represents the approximate **force pattern necessary to generate a response on that mode only** + + + +{{< figure src="/ox-hugo/ewins00_mifs.png" caption="Figure 27: Complex Mode Indicator Function (CMIF)" >}} + +
+
+ +In addition to identifying all the significant natural frequencies, the CMIF can also be used to **generate a set of enhanced FRFs** from the formula: + +\begin{equation} + [EFRF(\omega)]\_{n\times p} = [H(\omega)]\_{n\times p} [V(\omega)]\_{p\times p} +\end{equation} + +There is one non-trivial EFRF for each mode, the result of which is an almost **SDOF characteristic** response function which is then readily amenable to modal analysis by the simplest of methods. + +As in the previous case, these modified FRFs are simply linear combinations of the original measured data and, as such, contain no more and no less information than in their original form. + +However, such an approach lends itself to a very reliable extraction of the global properties (eigenvalues) for the measured FRF data set which can then be re-visited in a second stage to determine the local properties (mode shapes) for all the measured DOFs. + +
+ + +##### Other MIFs {#other-mifs} + +There are multiple variants on the mode indicator function concepts. Some use the eigenvalue decomposition instead of the singular value decomposition. +Two are worth mentioning: the Multivariable Mode Indicator Function (MMIF) and the Real Mode Indicator Function (RMIF). + + +### SDOF Modal Analysis Methods {#sdof-modal-analysis-methods} + + +#### Review of SDOF modal analysis methods {#review-of-sdof-modal-analysis-methods} + +The "SDOF" approach does not imply that the system being modeled is reduced to a single degree of freedom, that that just **one resonance is considered at a time**. + +There are limitations to such simple approach, the principal one being that **very close modes cannot easily be separated**. + +There are several implementations of the basic concept of SDOF analysis, ranging from the simple **peak-picking method**, through the classic **circle-fit approach** to more automatic algorithms such as the **inverse FRF "he-fit" method** and the general **least-squares methods**. + +As the name implies, the method exploits the fact that in the vicinity of a resonance, the behavior of the system is dominated by a single mode (the magnitude is dominated by one of the terms in the series). + +The general expression of the receptance FRF + +\begin{equation} + \alpha\_{jk}(\omega) = \sum\_{s=1}^N \frac{{}\_sA\_{jk}}{\omega\_s^2 - \omega^2 + i \eta\_s \omega\_s^2} +\end{equation} + +can be rewritten as: + +\begin{equation} + \alpha\_{jk}(\omega) = \frac{{}\_rA\_{jk}}{\omega\_r^2 - \omega^2 + i \eta\_r \omega\_r^2} + \sum\_{\substack{s=1\\s \neq r}}^N \frac{{}\_sA\_{jk}}{\omega\_s^2 - \omega^2 + i \eta\_s \omega\_s^2} +\end{equation} + +
+
+ +Now, the SDOF assumption is that for a small range of frequency in the vicinity of the natural frequency of mode \\(r\\), \\(\alpha\_{jk}(\omega)\\) can be approximated as + +\begin{equation} + \alpha\_{jk}(\omega)\_{\omega\approx\omega\_r} = \frac{{}\_rA\_{jk}}{\omega\_r^2 - \omega^2 + i \eta\_r \omega\_r^2} + {}\_rB\_{jk} +\end{equation} + +
+ +This does not mean that the other modes are unimportant or negligible (their influence can be considerable), but rather that their combined effect can be represented as a **constant term** around this resonance. + + +#### SDOF Modal Analysis I - Peak-Amplitude method {#sdof-modal-analysis-i-peak-amplitude-method} + +In this method, it is assumed that close to one local mode, any effects due to the other modes can be ignored. +This is a method which works adequately for structures whose FRF exhibit **well separated modes**. +This method is useful in obtaining initial estimates to the parameters. + +The peak-picking method is applied as follows (illustrated on figure [fig:peak_amplitude](#fig:peak_amplitude)): + +1. First, **individual resonance peaks** are detected on the FRF plot and the maximum responses frequency \\(\omega\_r\\) is taken as the **natural frequency** of that mode +2. Second, the **local maximum value of the FRF** \\(|\hat{H}|\\) is noted and the **frequency bandwidth** of the function for a response level of \\(|\hat{H}|/\sqrt{2}\\) is determined. + The two points thus identified as \\(\omega\_b\\) and \\(\omega\_a\\) are the "half power points" +3. The **damping** of the mode in question can now be estimated from of the following formulae: + + \begin{subequations} + \begin{align} + \eta\_r &= \frac{\omega\_a^2 - \omega\_b^2}{2 \omega\_r^2} \approx \frac{\Delta\omega}{\omega\_r} \\\\\\ + 2\xi\_r &= \eta\_r + \end{align} + \end{subequations} +4. We now obtain an estimate for the **modal constant** of the mode being analyzed by assuming that the total response in this resonant region is attributed to a single term in the general FRF series: + + \begin{equation} + |\hat{H}| = \frac{A\_r}{\omega\_r^2 \eta\_r} \Leftrightarrow A\_r = |\hat{H}| \omega\_r^2 \eta\_r + \end{equation} + +It must be noted that the estimates of both damping and modal constant depend heavily on the accuracy of the maximum FRF level \\(|\hat{H}|\\) which is difficult to measure with great accuracy, especially for lightly damped systems. +Only real modal constants and thus real modes can be deduced by this method. + + + +{{< figure src="/ox-hugo/ewins00_peak_amplitude.png" caption="Figure 28: Peak Amplitude method of modal analysis" >}} + +Alternatives of this method can be applied using the real part of the receptance FRF instead of the modulus plot. + + +#### SDOF Modal Analysis II - Circle Fit Method {#sdof-modal-analysis-ii-circle-fit-method} + + +##### Properties of the modal circle {#properties-of-the-modal-circle} + +MDOF systems produce Nyquist plots of FRF data which include **sections of near circular arcs** corresponding to the regions near the natural frequencies. +This characteristic provides the basic of the "**SDOF circle-fit method**". + +We here use **structural damping** and we use the **receptance** form of FRF data as this will produces an exact circle in a Nyquist plot. +However, if it is required to use a model incorporating viscous damping, then the mobility version of the FRF data should be used. + +In the case of a system assumed to have structural damping, the basic function with which we are dealing is + +\begin{equation} + \alpha(\omega) = \frac{1}{\omega\_r^2\left( 1 - \left(\omega/\omega\_r\right)^2 + i\eta\_r \right)} +\end{equation} + +since the only effect of including the modal constant \\({}\_rA\_{jk}\\) is to scale the size of the circle by \\(|{}\_rA\_{jk}|\\) and to rotate it by \\(\angle {}\_rA\_{jk}\\). +A plot of the quantity \\(\alpha(\omega)\\) is given in figure [fig:modal_circle](#fig:modal_circle). + + +
+ Table 21: + Modal Circle +
+ +| ![](/ox-hugo/ewins00_modal_circle.png) | ![](/ox-hugo/ewins00_modal_circle_bis.png) | +|----------------------------------------|--------------------------------------------------------------------| +| Properties | \\(\omega\_b\\) and \\(\omega\_a\\) points | +| width=\linewidth | width=\linewidth | + +For any frequency \\(\omega\\), we have the following relationship: + +\begin{subequations} + \begin{align} + \tan \gamma &= \frac{\eta\_r}{1 - (\omega/\omega\_r)^2}\\\\\\ + \tan(\SI{90}{\degree}-\gamma) &= \tan\left(\frac{\theta}{2}\right) = \frac{1 - (\omega/\omega\_r)^2}{\eta\_r} \label{eq:modal\_circle\_tan} + \end{align} +\end{subequations} + +From [eq:modal_circle_tan](#eq:modal_circle_tan), we obtain: + +\begin{equation} + \omega^2 = \omega\_r^2 \left(1 - \eta\_r \tan\left(\frac{\theta}{2}\right) \right) +\end{equation} + +If we differentiate [eq:modal_circle_omega](#eq:modal_circle_omega) with respect to \\(\theta\\), we obtain: + +\begin{equation} + \frac{d\omega^2}{d\theta} = \frac{-\omega\_r^2 \eta\_r}{2} \frac{\left(1 - (\omega/\omega\_r)^2\right)^2}{\eta\_r^2} +\end{equation} + +The reciprocal of this quantity is a **measure of the rate at which the locus sweeps around the circular arc**. +It may be seen to reach a maximum value when \\(\omega=\omega\_r\\): + +\begin{equation} + \tcmbox{\frac{d}{d\omega} \left(\frac{d\omega^2}{d\theta}\right) = 0 \text{ when } \omega\_r^2 - \omega^2 = 0} +\end{equation} + +It may also be seen that an **estimate of the damping** is provided by the sweep rate: + +\begin{equation} + \tcmbox{\left(\frac{d\theta}{d\omega^2}\right)\_{\omega=\omega\_r} = -\frac{2}{\omega\_r^2 \eta\_r}} +\end{equation} + +Suppose now we have two specific points on the circle, one corresponding to a frequency \\(\omega\_b\\) below the natural frequency and the other one \\(\omega\_a\\) above the natural frequency. +Referring to figure [fig:modal_circle_bis](#fig:modal_circle_bis), we can write: + +\begin{subequations} + \begin{align} + \tan\left(\frac{\theta\_b}{2}\right) &= \frac{1 - (\omega\_b/\omega\_r)^2}{\eta\_r}\\\\\\ + \tan\left(\frac{\theta\_a}{2}\right) &= \frac{(\omega\_a/\omega\_r)^2 - 1}{\eta\_r} + \end{align} +\end{subequations} + +From these two equations, we can obtain an expression for the **damping of the mode**: + +\begin{equation} + \tcmbox{\eta\_r = \frac{\omega\_a^2 - \omega\_b^2}{\omega\_r^2 \left(\tan(\theta\_a/2) + \tan(\theta\_b/2)\right)}} +\end{equation} + +which is an exact expression and applies for all levels of damping. + +If we take two points for which \\(\theta\_a = \theta\_b = \SI{90}{\degree}\\), we obtain: + +\begin{subequations} + \begin{align} + \eta\_r &= \frac{\omega\_2^2 - \omega\_1^2}{2 \omega\_r^2}\\\\\\ + \eta\_r &= \frac{\omega\_2 - \omega\_1}{\omega\_r} \text{ for light damping} + \end{align} +\end{subequations} + +When scaled by a modal constant \\({}\_rA\_{jk}\\) added in the numerator, the diameter of the circle will be +\\[ {}\_rD\_{jk} = \frac{\left|{}\_rA\_{jk}\right|}{\omega\_r^2 \eta\_r} \\] +and the whole circle will be rotated so that the principal diameter (the one passing through the natural frequency point) is oriented at an angle \\(\arg({}\_rA\_{jk})\\) to the negative Imaginary axis. + +For SDOF system with **viscous** damping, rather than structural damping, the **mobility** is +\\[ Y(\omega) = \frac{i\omega}{(k - \omega^2 m) + i \omega c} \\] + +And we have + +\begin{equation} + \tan\left(\frac{\theta}{2}\right) = \frac{1 - (\omega/\omega\_r)^2}{2 \xi \omega/\omega\_r} +\end{equation} + +From points at \\(\omega\_a\\) and \\(\omega\_b\\), we obtain + +\begin{subequations} + \begin{align} + \xi &= \frac{\omega\_a^2 - \omega\_b^2}{2 \omega\_r \left( \omega\_a \tan(\theta\_a/2) + \omega\_b \tan(\theta\_b/2) \right)}\\\\\\ + &= \frac{\omega\_a - \omega\_b}{\omega\_r \left( \tan(\theta\_a/2) + \tan(\theta\_b/2) \right)} \text{ for light damping} + \end{align} +\end{subequations} + +Finally, selecting two points for which \\(\theta\_a = \theta\_b = \SI{90}{\degree}\\): + +\begin{equation} + \xi = \frac{\omega\_2 - \omega\_1}{2 \omega\_r} +\end{equation} + + +##### Circle-fit analysis procedure {#circle-fit-analysis-procedure} + +The sequence is: + +1. **Select points to be used**. +2. **Fit circle, calculate quality of fit**. + It is generally done by a least-square algorithm. + Then we obtain the **center** and **radius** of the circle and the **quality factor** is the mean square deviation of the chosen points from the circle. +3. **Locate natural frequency, obtain damping estimate**. + The rate of sweep through the region is estimated numerically and the frequency at which it reaches the maximum is deduced. + At the same time, an estimate of the damping is derived using [eq:estimate_damping_sweep_rate](#eq:estimate_damping_sweep_rate). + A typical example is shown on figure [fig:circle_fit_natural_frequency](#fig:circle_fit_natural_frequency). +4. **Calculate multiple damping estimates, and scatter**. + A set of damping estimates using all possible combination of the selected data points are computed using [eq:estimate_damping](#eq:estimate_damping). + Then, we can choose the damping estimate to be the mean value. + We also look at the distribution of the obtained damping estimates as is permits a useful diagnostic of the quality of the entire analysis: + - Good measured data should lead to a smooth plot of these damping estimates, any roughness of the surface can be explained in terms of noise in the original data. + - However, any systematic distortion of the plot is almost certainly caused by some form of error in the data, in the analysis or in the assumed behavior of the system. +5. **Determine modal constant modulus and argument**. + The magnitude and argument of the modal constant is determined from the diameter of the circle and from its orientation relative to the Real and Imaginary axis. + + + +{{< figure src="/ox-hugo/ewins00_circle_fit_natural_frequency.png" caption="Figure 29: Location of natural frequency for a Circle-fit modal analysis" >}} + +Then, the theoretically regenerated FRF can be plotted against the original measured data for comparison. +In order to determines the contribution of other modes on the resonance of mode \\(r\\), the distance from the top of the principal diameter to the origin has to be measured and is equal to \\({}\_rB\_{jk}\\). + + +#### SDOF Modal Analysis III - Inverse or Line-fit method {#sdof-modal-analysis-iii-inverse-or-line-fit-method} + + +##### Properties of inverse FRF plots {#properties-of-inverse-frf-plots} + +The original version of this method uses the fact that a function which generates a circle when plotted in the complex plane will, when plotted as a reciprocal, trace out a **straight line**. +Thus, if we were to plot the reciprocal of receptance of a SDOF system with structural damping, we would find that in the Argand diagram it produces a straight line: + +\begin{subequations} + \begin{align} + \alpha(\omega) &= \frac{(k - \omega^2 m) - i d}{(k - \omega^2 m)^2 + d^2}\\\\\\ + \frac{1}{\alpha(\omega)} &= (k - \omega^2 m) + i d + \end{align} +\end{subequations} + +First, a least squares best-fit straight line is constructed through the data points and an **estimate for the damping parameters** is immediately available from the **intercept of the line with the Imaginary axis**. +Furthermore, an indication of the reliability of that estimate may be gained from the nature of the deviations of the data points from the line itself. +We can here determine whether the damping is structural (imaginary part constant with frequency) or viscous (imaginary part linear with frequency). + +Then, a second least squares operation is performed, this time on the deviation between the real part of the measured data points and that of the theoretical model. +Resulting from this, we obtain **estimates for the mass and stiffness parameters**. + +It should be noted that this approach is best suited to systems with real modes and to relatively well-separated modes. + + +##### General inverse analysis method {#general-inverse-analysis-method} + +It has been shown that if a purely SDOF system FRF is plotted in this way, then both plots demonstrate straight lines, and separately reveal useful information about the mass, stiffness and damping properties of the measured system. + +The inverse FRF of a MDOF system is not as convenient as SDOF system as: + +\begin{align\*} + H\_{jk}^{-1} (\omega) &= \frac{1}{\sum (k - \omega^2 m) + i \omega c}\\\\\\ + &\neq \sum \frac{1}{(k - \omega^2 m) + i \omega c} +\end{align\*} + +Thus, in order to determine the modal parameters of a MDOF system using inverse method, some modifications to the basic formulation must be found. + +We start with the basic formula for SDOF analysis: +\\[ \alpha\_{jk}(\omega)\_{\omega\simeq\omega\_r} \simeq \frac{{}\_rA\_{jk}}{\omega\_r^2 - \omega^2 + i \eta\_r \omega\_r^2} + {}\_rB\_{jk} \\] +We can note that the presence of the \\({}\_rB\_{jk}\\) term is problematic for the inverse plot. + +The trick is to define a **new FRF term** \\(\alpha^\prime\_{ik}(\omega)\\) which is the difference between the actual FRF and the value of the FRF at one fixed frequency \\(\Omega\\) in the range of interest called "**fixing frequency**": +\\[ \alpha^\prime\_{jk}(\omega) = \alpha\_{jk}(\omega) - \alpha\_{jk}(\Omega) \\] +from which the inverse FRF parameter that we shall use for the modal analysis \\(\Delta(\omega)\\), can be defined as: + +\begin{align\*} + \Delta(\omega) &= (\omega^2 - \Omega^2)/\alpha^\prime\_{jk}(\omega)\\\\\\ + &= \text{Re}(\Delta) + i \text{Im}(\Delta) +\end{align\*} + +It can be seen that +\\[ \text{Re}(\Delta) = m\_R \omega^2 + c\_R; \quad \text{Im}(\Delta) = m\_I \omega^2 + c\_I \\] +and that + +\begin{align\*} + m\_R &= a\_R(\Omega^2 - \omega\_r^2) - b\_r (\omega\_r^2 \eta\_r) \\\\\\ + m\_I &= -b\_R(\Omega^2 - \omega\_r^2) - a\_r (\omega\_r^2 \eta\_r) \\\\\\ + {}\_rA\_{jk} &= a\_R + i b\_r +\end{align\*} + +The first step of our **analysis procedure** can be made, as follows: + +1. Using the FRF data measured in the vicinity of the resonance \\(\omega\_r\\), choose the fixing frequency \\(\Omega\_j\\) and then calculate \\(\Delta(\omega)\\) +2. Plot these values on \\(\text{Re vs } \omega^2\\) and \\(\text{Im vs }\omega^2\\) plots and compute the best fit straight line in order to determine \\(m\_R(\Omega\_j)\\) and \\(m\_I(\Omega\_j)\\) + +Now it can be shown that both these straight line slopes \\(m\_R\\) and \\(m\_I\\) are simple functions of \\(\Omega\\), and we can write: +\\[ m\_R = n\_R \Omega^2 + d\_R \text{ and } m\_I = n\_I \Omega^2 + d\_I \\] +where + +\begin{equation} + \begin{aligned} + n\_R &= a\_r; \quad n\_I = -b\_r \\\\\\ + d\_R &= -b\_r(\omega\_r^2 \eta\_r) - a\_r \omega\_r^2; \quad d\_I = b\_r \omega\_r^2 - a\_r\omega\_r^2\eta\_r + \end{aligned} +\end{equation} + +Now let \\(p = n\_I/n\_R \text{ and } q = d\_I/d\_R\\), and noting that + +\begin{equation} + \begin{aligned} + \eta\_r &= \frac{q - p}{1 + pq}; \quad \omega\_r^2 = \frac{d\_R}{(p\eta\_r - 1)n\_R} \\\\\\ + a\_r &= \frac{\omega\_r^2(p\eta\_r - 1)}{(1 + p^2)d\_R}; \quad b\_r = -a\_r p + \end{aligned} +\end{equation} + +we now have sufficient information to extract estimates for the four parameters for the resonance which has been analyzed: \\(\omega\_r, \eta\_r, \text{ and } {}\_rA\_{jk} = a\_r + i b\_r\\). + +
    +
  1. Plot graphs of \\(m\_R(\Omega)\\) vs \\(\Omega^2\\) and of \\(m\_I(\Omega)\\) vs \\(\Omega^2\\) using the results from step 1., each time using a different measurement points as the fixing frequency \\(\Omega\_j\\)
  2. +
  3. Determine the slopes of the best fit straight lines through these two plots, \\(n\_R\\) and \\(n\_I\\), and their intercepts with the vertical axis \\(d\_R\\) and \\(d\_I\\)
  4. +
  5. Use these four quantities, and equation [eq:modal_parameters_formula](#eq:modal_parameters_formula), to determine the **four modal parameters** required for that mode
  6. +
+ +This procedure which places more weight to points slightly away from the resonance region is likely to be less sensitive to measurement difficulties of measuring the resonance region. + + +#### Residuals {#residuals} + + +##### Concept of residual terms {#concept-of-residual-terms} + +We need to introduce the concept of **residual terms**, necessary in the modal analysis process to take account of those modes which we do not analyze directly but which nevertheless exist and have an influence on the FRF data we use. + +The first occasion on which the residual problem is encountered is generally at the end of the analysis of a single FRF curve, such as by the repeated application of an SDOF curve-fit to each of the resonances in turn until all modes visible on the plot have been identified. +At this point, it is often desired to construct a theoretical curve (called "**regenerated**"), based on the modal parameters extracted from the measured data, and to overlay this on the original measured data to assess the success of the curve-fit process. +Then the regenerated curve is compared with the original measurements, the result is often disappointing, as illustrated in figure [fig:residual_without](#fig:residual_without). +However, by the inclusion of two simple extra terms (the "**residuals**"), the modified regenerated curve is seen to correlate very well with the original experimental data as shown on figure [fig:residual_with](#fig:residual_with). + + +
+ Table 22: + Effects of residual terms on FRF regeneration +
+ +| ![](/ox-hugo/ewins00_residual_without.png) | ![](/ox-hugo/ewins00_residual_with.png) | +|--------------------------------------------|-----------------------------------------| +| without residual | with residuals | +| width=\linewidth | width=\linewidth | + +If we regenerate an FRF curve from the modal parameters we have extracted from the measured data, we shall use a formula of the type + +\begin{equation} + H\_{jk}(\omega) = \sum\_{r = m\_1}^{m\_2} \frac{{}\_rA\_{jk}}{\omega\_r^2 - \omega^2 + i \eta\_r \omega\_r^2} +\end{equation} + +in which \\(m\_1\\) and \\(m\_2\\) reflects that we do not always start at the first mode (\\(r = 1\\)) and continue to the highest mode (\\(r = N\\)). + +However, the equation which most closely represents the measured data is: + +\begin{equation} + H\_{jk}(\omega) = \sum\_{r = 1}^{N} \frac{{}\_rA\_{jk}}{\omega\_r^2 - \omega^2 + i \eta\_r \omega\_r^2} +\end{equation} + +which may be rewritten as + +\begin{equation} + H\_{jk}(\omega) = \left( \sum\_{r=1}^{m\_1-1} + \sum\_{r=m\_1}^{m\_2} + \sum\_{r = m\_2+1}^{N} \right) \frac{{}\_rA\_{jk}}{\omega\_r^2 - \omega^2 + i \eta\_r \omega\_r^2} +\end{equation} + +The three terms corresponds to: + +1. the **low frequency modes** not identified +2. the **high frequency modes** not identified +3. the **modes actually identified** + +These three terms are illustrated on figure [fig:low_medium_high_modes](#fig:low_medium_high_modes). + + + +{{< figure src="/ox-hugo/ewins00_low_medium_high_modes.png" caption="Figure 30: Numerical simulation of contribution of low, medium and high frequency modes" >}} + +From the sketch, it may be seen that within the frequency range of interest: + +- the first term tends to approximate to a **mass-like behavior** +- the third term approximates to a **stiffness effect** + +Thus, we have a basis for the residual terms and shall rewrite equation [eq:sum_modes](#eq:sum_modes): + +\begin{equation} + H\_{jk}(\omega) \simeq -\frac{1}{\omega^2 M\_{jk}^R} + \sum\_{r=m\_1}^{m\_2} \left( \frac{{}\_rA\_{jk}}{\omega\_r^2 - \omega^2 + i \eta\_r \omega\_r^2} \right) + \frac{1}{K\_{jk}^R} +\end{equation} + +where the quantities \\(M\_{jk}^R\\) and \\(K\_{jk}^R\\) are the **residual mass and stiffness** for that **particular FRF** and **chosen frequency range**. + + +##### Calculation of residual mass and stiffness terms {#calculation-of-residual-mass-and-stiffness-terms} + +First, we compute a few values of the regenerated FRF curve at the lower frequencies covered by the tests, using only the identified modal parameters. +Then, by comparing these values with those from actual measurements, we estimate a mass residual constant which, when added to the regenerated curve, brings this closely into line with the measured data. + +Then, the process is repeated at the top end of the frequency range, this time seeking a residual stiffness. +Often, the process is more effective if there is an antiresonance near either end of the frequency range which this is then used as the point of adjustment. + +The procedure outlined here may need to be repeated **iteratively** in case the addition of the stiffness residual term then upsets the effectiveness of the mass term. + +It should be noted that often there is a **physical significance to the residual terms**. +If the test structure is freely-supported and its rigid body modes are well below the minimum frequency of measurement, then the mass residual term will be a direct reflection of the rigid body mass and inertia properties of the structure. +The high frequency residual can represent the local flexibility at the drive point. + + +##### Residual and pseudo modes {#residual-and-pseudo-modes} + +Sometimes it is convenient to **treat the residual terms as if they were modes**. +Instead of representing each residual effect by a constant, each can be represented by a pseudo mode. +For the low frequency residual effects, this pseudo mode has a natural frequency below the lowest frequency on the measured FRF, and for the high frequency residual effects, that pseudo mode has a natural frequency which is above the highest frequency of the measured FRF. +These pseudo modes can be conveniently included in the list of modes which have been extracted by modal analysis of that FRF. + +Using pseudo modes instead of simple residual mass and stiffness terms is a more accurate way of representing the out-of-range modes. +There is one warning, however, and that is to point out that these pseudo modes are **not** genuine modes and that they cannot be used to deduce the corresponding contributions of these same modes for any other FRF curve. + + +#### Refinement of SDOF modal analysis methods {#refinement-of-sdof-modal-analysis-methods} + +In the modal analysis methods discussed above, an assumption is made that near the resonance under analysis, the effect of **all** the other modes could be represented by a constant. +When there are neighboring modes close to the one being analyzed, this assumption may not be valid. + +
+
+ +"Close" is begin loosely defined as a situation where the separation between the natural frequencies of two adjacent modes is less than the typical damping level, both measured as percentage. + +
+ +However, we can usually remove that restriction and thereby make a more precise analysis of the data. + +We can write the receptance in the frequency range of interest as: + +\begin{equation} + \begin{aligned} + H\_{jk}(\omega) &= \sum\_{s=m\_1}^{m\_2} \left( \frac{{}\_sA\_{jk}}{\omega\_s^2 - \omega^2 + i \eta\_s \omega\_s^2} \right) + \frac{1}{K\_{jk}^R}-\frac{1}{\omega^2 M\_{jk}^R} \\\\\\ + &= \left( \frac{{}\_rA\_{jk}}{\omega\_r^2 - \omega^2 + i\eta\_r \omega\_r^2} \right) \\\\\\ + &+ \left(\sum\_{\substack{s=m\_1\\s \neq r}}^{m\_2} \frac{{}\_sA\_{jk}}{\omega\_s^2 - \omega^2 + i\eta\_s \omega\_s^2} + \frac{1}{K\_{jk}^R} - \frac{1}{\omega^2 M\_{jk}^R} \right) + \end{aligned} +\end{equation} + +In the previous methods, the second term was assumed to be a constant in the curve-fit procedure for mode \\(r\\). +However, if we have good **estimates** for the coefficients which constitutes the second term, for example by having already completed an SDOF analysis, we may remove the restriction on the analysis. +Indeed, suppose we take a set of measured data points around the resonance at \\(\omega\_r\\), and that we can compute the magnitude of the second term in [eq:second_term_refinement](#eq:second_term_refinement), we then subtract this from the measurement and we obtain adjusted data points that are conform to a true SDOF behavior and we can use the same technique as before to obtain **improved estimated** to the modal parameters of more \\(r\\). + +This procedure can be repeated iteratively for all the modes in the range of interest and it can significantly enhance the quality of found modal parameters for system with **strong coupling**. + + +### MDOF Modal analysis in the frequency domain (SISO) {#mdof-modal-analysis-in-the-frequency-domain--siso} + + +#### General Approach {#general-approach} + +There are a number of situations in which the SDOF approach to modal analysis is inadequate and for these there exist several alternative methods which may generally be classified as MDOF modal analysis methods. +These situations are generally those with closely coupled modes where the single mode approximation is inappropriate and those with extremely light damping for which measurements at resonance are inaccurate. + +
+
+ +Three approach to curve-fit the entire FRF in one step are considered here: + +1. a general approach to multi-mode curve-fitting +2. a method based on the rational fraction FRF formulation +3. a method particularly suited to very lightly-damped structures + +
+ + +#### Method I - General Curve Fit approach - Non-linear Least Squares (NLLS) {#method-i-general-curve-fit-approach-non-linear-least-squares--nlls} + +We shall denote the individual FRF measured data as: +\\[ H\_{jk}^m(\Omega\_l) = H\_l^m \\] +while the corresponding "theoretical" values are: + +\begin{equation} + \begin{aligned} + H\_l &=H\_{jk}(\Omega\_l) \\\\\\ + &= \sum\_{s=m\_1}^{m\_2}\frac{{}\_sA\_{jk}}{\omega\_s^2 - \Omega\_l^2 + i \eta\_s \omega\_s^2} + \frac{1}{K\_{jk}^R} - \frac{1}{\Omega\_l^2 M\_{jk}^R} + \end{aligned} +\end{equation} + +where the coefficients \\({}\_1A\_{jk}, {}\_2A\_{jk}, \dots, \omega\_1, \omega\_2, \dots, \eta\_1, \eta\_2, \dots, K\_{jk}^R \text{ and }M\_{jk}^R\\) are all to be **determined**. + +We can define an **individual error** as: + +\begin{equation} + \epsilon\_l = H\_l^m - H\_l +\end{equation} + +and express this as a scalar quantity: + +\begin{equation} + E\_l = \left| \epsilon\_l^2 \right| +\end{equation} + +If we further increase the generality by attaching a **weighting factor** \\(w\_l\\) to each frequency point of interest, then the curve fit process has to determine the values of the unknown coefficients such that the total error: + +\begin{equation} + E = \sum\_{l = 1}^p w\_l E\_l +\end{equation} + +is minimized. + +This is achieved by differentiating [eq:error_weighted](#eq:error_weighted) with respect to each unknown in turn, thus generating a set of as many equations as there are unknown: + +\begin{equation} + \frac{d E}{d q} = 0; \quad q = {}\_1A\_{jk}, {}\_2A\_{jk}, \dots +\end{equation} + +Unfortunately, this set of equations are **not linear** in many of the coefficients and thus cannot be solved directly. +It is from this point that the differing algorithms choose their individual procedures: making various simplifications, assumptions or linearizing the expressions. + + +#### Method II - Rational Fraction Polynomial Method (RFP) {#method-ii-rational-fraction-polynomial-method--rfp} + +The method which has emerged as one the **standard** frequency domain modal analysis methods is that known as the **Rational Fraction Polynomial** (**RFP**) method. +This method is a special version of the general curve fitting approach but is based on a different formulation for the theoretical expression used for the FRF. + +
+
+ +\begin{subequations} +\begin{align} + H(\omega) &= \sum\_{r=1}^N \frac{A\_r}{\omega\_r^2 - \omega^2 + 2 i \omega \omega\_r \xi\_r} \label{eq:frf\_clasic} \\\\\\ + &= \frac{b\_0 + b\_1(i\omega) + \dots + b\_{2N-1}(i\omega)^{2N-1}}{a\_0 + a\_1(i\omega) + \dots + a\_{2N}(i\omega)^{2N}} \label{eq:frf\_rational} +\end{align} +\end{subequations} + +In this formulation, we have adopted the **viscous damping model**. + +
+ +The unknown coefficients \\(a\_0, \dots, a\_{2N}, b\_0, \dots, b\_{2N-1}\\) are not the modal properties but are related to them and are computed in a further stage of processing. + +The particular advantage of this approach is the possibility of formulating the curve fitting problem as a **linear set of equations**, thereby making the solution amenable to a direct matrix solution. + +We shall denote each of our measured FRF data point by \\(\hat{H}\_k\\), where \\(\hat{H}\_k = \hat{H}(\omega\_k)\\), and define the error between that measured value and the corresponding value derived from the curve-fit expression as + +\begin{equation} + e\_k = \frac{b\_0 + b\_1(i\omega\_k) + \dots + b\_{2m-1}(i\omega\_k)^{2m-1}}{a\_0 + a\_1(i\omega\_k) + \dots + a\_{2m}(i\omega\_k)^{2m}} - \hat{H}\_k +\end{equation} + +leading to the modified, but more convenient version actually used in the analysis + +\begin{equation} + \begin{aligned} + e\_k^\prime &= \left( b\_0 + b\_1(i\omega\_k) + \dots + b\_{2m-1}(i\omega\_k)^{2m-1} \right)\\\\\\ + &- \hat{H}\_k\left( a\_0 + a\_1(i\omega\_k) + \dots + a\_{2m}(i\omega\_k)^{2m} \right) + \end{aligned} +\end{equation} + +In these expressions, only \\(m\\) modes are included in the theoretical FRF formula: the true number of modes, \\(N\\), is actually one of the **unknowns** to be determined during the analysis. +Equation [eq:rpf_error](#eq:rpf_error) can be rewritten as follows: + +\begin{equation} + \begin{aligned} + e\_k^\prime &= \begin{Bmatrix} 1 & i \omega\_k & \dots & (i\omega\_k)^{2m-1} \end{Bmatrix} + \begin{Bmatrix} b\_0 \\ \vdots \\ b\_{2m-1} \end{Bmatrix}\\\\\\ + &- \hat{H}\_k \begin{Bmatrix} 1 & i\omega\_k & \dots & (i\omega\_k)^{2m-1} \end{Bmatrix} + \begin{Bmatrix} a\_0 \\ \vdots \\ a\_{2m-1} \end{Bmatrix}\\\\\\ + &- \hat{H}\_k (i\omega\_k)^{2m} a\_{2m} + \end{aligned} +\end{equation} + +and the \\(L\\) linear equations corresponding to \\(L\\) individual frequency points can be combined in matrix form: + +\begin{equation} +\begin{aligned} + \\{E^\prime\\}\_{L \times 1} &= [P]\_{L\times 2m} \\{b\\}\_{2m\times 1}\\\\\\ + &- [T]\_{L\times(2m+1)} \\{a\\}\_{(2m+1)\times 1}\\\\\\ + &- \\{W\\}\_{L\times 1} +\end{aligned} +\end{equation} + +Solution for the unknown coefficients \\(a\_j, \dots, b\_k, \dots\\) is achieved by minimizing the error function + +\begin{equation} + J = \\{E^\*\\}^T\\{E\\} +\end{equation} + +and this leads to + +\begin{equation} + \begin{bmatrix} + [Y] & [X] \\\\\\ + [X]^T & [Z] + \end{bmatrix}\_{L \times (4m+1)} + \begin{Bmatrix} + \\{b\\} \\ \\{a\\} + \end{Bmatrix}\_{(4m+1) \times 1} + = \begin{Bmatrix} + \\{B\\} \\ \\{F\\} + \end{Bmatrix}\_{L \times 1} +\end{equation} + +where \\([X], [Y], [Z], \\{G\\}\\) and \\(\\{F\\}\\) are known measured quantities: + +\begin{equation} + \begin{aligned} + [Y] &= \text{Re}\left( [P^\*]^T[P] \right);\quad [X] = \text{Re}\left( [P^\*]^T[T] \right); \\\\\\ + [Z] &= \text{Re}\left( [T^\*]^T[T] \right); \\\\\\ + \\{G\\} &= \text{Re}\left( [P^\*]\\{W\\} \right);\quad \\{F\\} = \text{Re}\left( [T^\*]\\{W\\} \right); + \end{aligned} +\end{equation} + +Once the solution has been obtained for the coefficients \\(a\_k, \dots , b\_k, \dots\\) then the second stage of the modal analysis can be performed in which the required **modal parameters are derived**. +This is usually done by solving the two polynomial expressions which form the numerator and denominator of equations [eq:frf_clasic](#eq:frf_clasic) and [eq:frf_rational](#eq:frf_rational): + +- the denominator is used to obtain the natural frequencies \\(\omega\_r\\) and damping factors \\(\xi\_r\\) +- the numerator is used to determine the complex modal constants \\(A\_r\\) + +In order to determine the order, the analysis is repeated using different assumed values for the order \\(m\\) and are compared. +For each analysis, there will be properties found for as many modes as prescribed by the chosen model order. +Some of these will be genuine modes while others will be fictitious modes. +Various strategies may be adopted to separate the fictitious and real modes: + +- measuring the difference between the original FRF curve and that regenerated using the modal properties derived +- measuring the consistency of the various modal parameters for different model order choices and eliminating those which vary widely from run to run + +In all these checks, interest is concentrated on the **repeatability** of the various modal properties: modes which reappear for all choices of data and model condition are believed to be genuine, while those which vary from run to run are more likely to have computational features due to the curve-fitting requirements as their origins, rather than physical ones. + + +#### Method III - Lightly Damped Structures {#method-iii-lightly-damped-structures} + +It is found that some structures do not provide FRF data which respond very well to the above modal analysis procedures mainly because of the difficulties encountered in acquiring good measurements near resonance. + +For such structures, it is often the case that interest is confined to an **undamped model** of the test structure since the damping in a complete structural assembly is provided mostly from the joints and not from the components themselves. +Thus, there is scope for an alternative method of modal analysis which is capable of providing the required modal properties, in this case **natural frequencies** and **real modal constants**, using data measured **away from the resonance regions**. + +The requirements for the analysis are as follows: + +1. measure the FRF over the frequency range of interest +2. locate the resonances and note the corresponding natural frequencies +3. select individual FRF measurement data points from as many frequencies as there are modes, plus two, confining the selection to points away from resonance +4. using the data thus gathered, compute the modal constants +5. construct a regenerated curve and compare this with the full set of measured data points + + +### Global modal analysis methods in the frequency domain {#global-modal-analysis-methods-in-the-frequency-domain} + + +#### General Approach {#general-approach} + +More recent curve fitting procedures are capable of performing a multi curve fit instead of just working with individual FRF curves. +They **fit several FRF curves simultaneously**, taking due account of the fact that the **properties of all the individual curves are related** by being from the **same structure**: +all FRF plots on a given testpiece should indicate the same values for natural frequencies and damping factor of each mode. + +Such methods have the advantage of producing a **unique and consistent model** as direct output. + +
+
+ +A way in which a set of measured FRF curves may be used collectively, rather than singly, is by the construction of a single Composite Response Function: + +\begin{equation} + \sum\_j\sum\_k H\_{jk}(\omega) = \sum\_j\sum\_k\sum\_{r=1}^N (\dots) = HH(\omega) +\end{equation} + +with +\\[ H\_{jk} = \sum\_{r=1}^n \frac{{}\_rA\_{jk}}{\omega\_r^2 - \omega^2 + i \eta\_r \omega\_r^2} \\] + +
+ +The composite function \\(HH(\omega)\\) can provide a useful means of determining a single (average) value for the natural frequency and damping factor for each mode where the individual functions would each indicate slightly different values. +As an example, a set of mobilities measured are shown individually in figure [fig:composite_raw](#fig:composite_raw) and their summation shown as a single composite curve in figure [fig:composite_sum](#fig:composite_sum). + + +
+ Table 23: + Set of measured FRF +
+ +| ![](/ox-hugo/ewins00_composite_raw.png) | ![](/ox-hugo/ewins00_composite_sum.png) | +|-------------------------------------------|-----------------------------------------| +| Individual curves | Composite curve | +| width=\linewidth | width=\linewidth | + +The global analysis methods have the disadvantages first, that the computation power required is high and second that there may be valid reasons why the various FRF curves exhibit slight differences in their characteristics and it may not always be appropriate to average them. + + +#### Global Rational Fraction Polynomial Method (GRFP) {#global-rational-fraction-polynomial-method--grfp} + +The basic Rational Fraction Polynomial (RFP) method that was described in the context of single FRF curve can be generalized to multi-FRF data. +Indeed, all the FRFs from the same structure will have identical numerator polynomials. +The number of unknown coefficients in a problem where there are \\(n\\) measured FRFs and \\(m\\) modes of vibration is of the order \\((n+1)(2m + 1)\\). + + +#### Global SVD method {#global-svd-method} + +A set of FRFs with a signal reference (such as are contained within a column from the complete FRF matrix) can be referred to the underlying modal model of the structure (assumed to have viscous damping) by the equation: + +\begin{align\*} + \\{H(\omega)\\}\_k &= \begin{Bmatrix} H\_{1k}(\omega) \\ \vdots \\ H\_{nk}(\omega) \\ \end{Bmatrix}\_{n\times 1} \\\\\\ + &= [\Phi]\_{n\times N} \\{g\_k(\omega)\\}\_{N\times 1} + \\{R\_k(\omega)\\}\_{n\times 1} +\end{align\*} + +where \\(\\{R\_k(\omega)\\}\\) is a vector containing the relevant residual terms and \\(\\{g\_k(\omega)\\}\\) is defined as: +\\[ \\{g\_k(\omega)\\}\_{N\times 1} = [i \omega - s\_r]\_{N\times N}^{-1} \\{\phi\_k\\}\_{N\times 1} \\] + +Also +\\[ \\{\dot{H}(\omega)\\}\_k = [\Phi] [s\_r] \\{g\_k(\omega)\\} + \\{\dot{R}\_k(\omega)\\} \\] + +Next, we can write the following expressions + +\begin{equation} +\begin{aligned} + \\{\Delta H(\omega\_i)\\}\_k &= \\{H(\omega\_i)\\}\_k - \\{H(\omega\_{i+c})\\}\_k \\\\\\ + &\approx [\Phi] \\{\Delta g\_k(\omega\_i)\\}\_{N\times 1} \\\\\\ + \\{\Delta \dot{H}(\omega\_i)\\}\_k &\approx [\Phi] [s\_r] \\{\Delta g\_k(\omega\_i)\\}\_{N\times 1} +\end{aligned} +\end{equation} + +If we now consider data at several different frequencies \\(i = 1, 2, \dots, L\\), we can write + +\begin{equation} + \begin{aligned} + [\Delta H\_k]\_{n\times L} &= [\Phi] [\Delta g\_k]\_{N\times L} \\\\\\ + [\Delta \dot{H}\_k]\_{n\times L} &= [\Phi] [s\_r] [\Delta g\_k]\_{N\times L} + \end{aligned} +\end{equation} + +We can construct an eigenvalue problem: +\\[ \left( [\Delta \dot{H}\_k]^T - s\_r [\Delta H\_k]^T \right) \\{z\\}\_r = \\{0\\} \\] +where +\\[ [z] = [\Phi]^{+T} \\] + +If we solve \\([z] = [\Phi]^{+T}\\) using the SVD, we can determine the rank of the FRF matrices and thus the correct number of modes \\(m\\) to be identified, leading to the appropriate eigenvalues \\(s\_r;\ r=1, \dots, m\\). + +Then, in order to determine the mode shapes, the modal constants can be recovered from: + +\begin{equation} +\begin{aligned} + \begin{Bmatrix} H\_{jk}(\omega\_1) \\ \vdots \\ H\_{jk}(\omega\_L) \\ \end{Bmatrix}\_{L\times 1} = &\begin{bmatrix} + (i\omega\_1 - s\_1)^{-1} & (i\omega\_1 - s\_2)^{-1} & \dots \\\\\\ + (i\omega\_2 - s\_1)^{-1} & (i\omega\_2 - s\_2)^{-1} & \dots \\\\\\ + \vdots & \dots & \dots \\\\\\ + \vdots & \dots & (i\omega\_L - s\_m)^{-1} \\\\\\ +\end{bmatrix}\\\\\\ + &\begin{Bmatrix} + {}\_1A\_{jk}\\ \vdots \\ {}\_mA\_{jk} +\end{Bmatrix}\_{m\times 1} +\end{aligned} +\end{equation} + +Using this approach, it is possible to extract a consistent set of modal parameters for the model whose FRFs have been supplied. + + +### Concluding comments {#concluding-comments} + +In the task of extracting modal model parameters from measured test data, the analyst must rely on the skill of others who have coded the various analysis algorithms since these are generally complex. +Because of this, the analyst must develop the various skills which enable him to **select the most appropriate analysis procedure** for each case and to make the best interpretation of the output of these analysis methods. + +In this chapter, we have first highlighted the **need for accuracy and reliability in the measured data** that is the source of a modal analysis. +If these data are not of high quality, the resulting modal model cannot be expected to be any better. +Thus, attention must be paid at the initial phases to ascertain and to assure the necessary quality of the raw data. +Question as to the **correct order for the model** and the **most appropriate model for damping** are often foremost among these early interpretations. + +A hierarchy of different types of modal analysis procedure have been cataloged, from the simple SDOF one-mode-at-a-time for a single response function, through MDOF methods which reveal several modes at a time, to global analysis methods where several modes are extracted simultaneously from several FRFs. + + +## Derivation of Mathematical Models {#derivation-of-mathematical-models} + + +### Introduction {#introduction} + +We consider now the derivation of a mathematical model to describe the dynamic behavior of the test structure. +Various types of model exists and are suitable in different cases. +The most important aspect of the modeling process is to **decide exactly which type of model we should seek** before setting out on the acquisition and processing of experimental data. + +Three main categories of model are identified: + +- **Spatial model**: mass, stiffness and damping +- **Modal model**: natural frequencies, mode shapes +- **Response model**: frequency response functions + +There exist **complete models** of each type and the more realistic **incomplete models** we are obliged to consider in practical cases. + +The three types of model are usually derived as \\(\text{Spatial}\Rightarrow\text{Modal}\Rightarrow\text{Response}\\) for theoretical analysis, and conversely, as \\(\text{Response}\Rightarrow\text{Modal}\Rightarrow\text{Spatial}\\) for an experimental study. +We may now view them in a different order, according to the facility with which each may be derived from the test data: Modal, Response and then Spatial. +This reflects the quantity of the completeness of data required in each case. + +
+
+ +A modal model can be constructed using just one single mode, and including only a handful of degrees of freedom, even though the structure has many modes and many DOFs. +Such a model can be built up by adding data from more modes, but it is not a requirement that **all** the modes should be included nor even that all the modes in the frequency range of interest be taken into account. +Thus such a model may be derived with relatively few, or equally, with many data. + +
+ +
+
+ +The response type of model in the form of a FRF matrix, such as the mobility matrix, also needs only to include information concerning a limited number of point of interest: not all the DOFs need be considered. +However, in this case, it is generally required that the model be valid over a specified frequency range, and here it is necessary that all the modes in that range be included. +Also, some account should be taken of the modes whose natural frequencies lie outside of the range of interest to allow for the residual effects. +Thus, the response type of model demands more data to be collected from the tests. + +
+ +
+
+ +A representative spatial model can only be obtained if we have measured most of the modes of the structure and if we have made measurements at a great many of the DOFs it possesses. +This is generally a very demanding requirement to meet, and as result, the derivation of a spatial model from test data is very difficult to achieve. + +
+ +This chapter is organized with the following structure: + +1. We shall describe what data must be measured in order to construct a suitable model and what checks can be made to access the reliability of the model. +2. We shall discuss a number of techniques for "**refining**" the model which is obtained from the test so that it matches a number of features of the analytical model. + For instance, it is common to extract complex mode shapes from the test data on real structures but the analytical models are usually undamped so that their modes are real. +3. We may wish to expand our experimental model, or, alternatively, reduce the theoretical ones so that the two models which are to be compared are at least of the same order. +4. We shall explore some of the properties of the models which can be derived by the means described here. + + +### Modal models {#modal-models} + + +#### Requirements to construct modal model {#requirements-to-construct-modal-model} + +A **modal model** of a structure consists of **two matrices**: + +- one containing the **natural frequencies** and **damping factors**: the eigenvalues +- one which describes the **shapes of the corresponding modes**: the eigenvectors + +Thus, we can construct such a model with just a single mode, and a more complete model is assembled simply by **adding together a set of these single-mode descriptions**. + +The basic method of deriving a modal model is as follows. +First, we note that from a single FRF curve, \\(H\_{jk}(\omega)\\), it is possible to extract certain modal properties for the \\(r^\text{th}\\) mode by modal analysis: + +\begin{equation} + H\_{jk}(\omega) \longrightarrow \omega\_r, \eta\_r, {}\_rA\_{jk}; \quad r=1, m +\end{equation} + +Now, although this gives us the natural frequency and damping properties directly, it does not explicitly yield the mode shape: only a modal constant \\({}\_rA\_{jk}\\) which is formed from the mode shape data. +In order to extract the individual elements \\(\phi\_{jr}\\) of the mode shape matrix \\([\Phi]\\), it is necessary to make a series of measurements of specific FRFs including, especially, the point FRF at the excitation position. +If we measure \\(H\_{kk}\\), then by using [eq:modal_model_from_frf](#eq:modal_model_from_frf), we also obtain the specific elements in the mode shape matrix corresponding to the excitation point: + +\begin{equation} + H\_{kk}(\omega) \longrightarrow \omega\_r, \eta\_r, {}\_rA\_{jk} \longrightarrow \phi\_{kr}; \quad r=1, m +\end{equation} + +If we then measure an associated transfer FRF using the same excitation position, such as \\(H\_{jk}\\), we are able to deduce the mode shape element corresponding to the new response point \\(\phi\_{jr}\\) using the fact that the relevant modal constants may be combined with those from the point measurement: + +\begin{equation} + \tcmbox{\phi\_{jr} = \frac{{}\_rA\_{jk}}{\phi\_{kr}}} +\end{equation} + +Hence, we find that in order to derive a modal model referred to a particular set of \\(n\\) coordinates, we need to measure and analysis a set of \\(n\\) FRF curves, **all sharing the same excitation point** and thus constituting one point FRF and \\((n-1)\\) transfer FRFs. +In terms of the complete FRF matrix, this corresponds to measure the individual FRF of **one entire column**. +It is also possible to measure one row of the FRF matrix. This corresponds of a set of \\(n\\) FRF curves sharing the same measurement point and varied excitation point. + +Often, several additional elements from the FRF matrix would be measured to provide a check, or to replace poor data, and sometimes measurement of a complete **second column** or row might be advised in order to ensure that one or more modes have not been missed by an unfortunate choice of exciter location. +Indeed, if the exciter is placed at a nodal point of one of the modes, then there would be no indications at all of the existence of that mode because every modal constant would be zero for that mode. +It may then require more than one measurement to confirm that we are not exciting the structure at a nodal point. + +Once all the selected FRF curves have been measured and individually analyzed, we obtain a set of modal properties containing **more data than needed**: + +- we may have determined many separate estimates for the natural frequencies and damping factors as these parameters are extracted from each FRF curve +- in the even we have measured more than one row or one column for the FRF matrix, we also obtain separate estimates for the mode shapes + +The simplest procedure is to average all the individual estimates that results in means values \\(\tilde{\omega}\_r\\) and \\(\tilde{\eta}\_r\\). +In practice, not all the estimates should carry equal weight because some would probably be derived from much more satisfactory curve fits than others. +A refined procedure would be to calculate a **weighted mean** of all the estimate using the quality factor obtained from the curve-fit procedure. + +If we choose to accept a revised value for \\(\omega\_r\\) and \\(\eta\_r\\) of a particular mode, then the value for the modal constant should also be revised: + +\begin{equation} + {}\_r\tilde{A}\_{jk} = {}\_rA\_{jk}\frac{\tilde{\omega}\_r^2 \tilde{\eta}\_r}{\omega\_r^2 \eta\_r} +\end{equation} + +The final reduced model obtained consist of the two matrices which constitute a modal model, namely: +\\[ \left[ \omega\_r^2(1 + i\eta\_r) \right]\_{m\times m};\quad \left[ \Phi \right]\_{n\times m} \\] + + +#### Double modes or repeated roots {#double-modes-or-repeated-roots} + +When a structure has two modes that are very close in frequency, it may be impossible to derive a true model for the structure. +All we can define in these circumstances is a single equivalent mode which is, in fact, a combination of the two actual modes that are difficult to identify individually. + +However, single equivalent modes can lead to erroneous models and it is very important that we can detect the presence of double modes and that we can identify all the modes which are present. + +The only way repeated modes can be detected and identified in a modal test is by using data from more than on reference. +This means that we must measure FRF data from **more than a single row or column** (as many rows/columns as there are repeated roots). + + +#### Constructing models of NSA structures {#constructing-models-of-nsa-structures} + +Structures which are classified as Non-Self-Adjoint (NSA) have **non-symmetric mass**, **stiffness** or **damping matrices**. +This often occurs in structures with rotating components. +As a result, we cannot take advantage of the symmetry of the system matrices and just measuring a single row or column of the FRF matrix. + +In the case of NSA structures, we are required to measure and analyze the elements in **both a row and a column of the FRF matrix**. +A mathematical explanation is that this class of system have two types of eigenvectors (left-hand and right-hand) and thus there are twice as many eigenvectors elements to identify. + + +#### Quality checks for modal models {#quality-checks-for-modal-models} + +It is important to check the **reliability** of the obtain results. +There are two such checks that can be recommended for this phase of the process. + +First, it is possible to **regenerate FRFs** from the modal model. +These FRFs can be compared with measured data that as been used for the modal analysis. +Furthermore, it is also possible to synthesize FRFs that have not yet been measured (and thus not used for the model), and then to measure the corresponding FRF on the structure and to compare. +This test provides a powerful demonstration of the validity of the modal model. + +A second, more demanding but also more convincing, demonstration of the validity of the modal model is to use the model to predict how the dynamic properties of the test structure will change if it is subjected to a small structural modification, such as can be occasioned by adding a small mass at a selected point. +Then such modification can be made and the real structure, measurements done and compare with the modified model. + + +### Refinement of modal models {#refinement-of-modal-models} + + +#### Need for model refinement {#need-for-model-refinement} + +Several differences exist between most test-derived models and analytical models that make their comparison difficult. + +The first difference are on the **mode shapes**: + +- **test-derived**: generally complex +- **analytical**: usually real if we use an undamped model + +Objective comparison between complex mode shapes and real mode shapes is then not possible and some refinement of one of the two sets are required. + +A second incompatibility lies in the difference in the **order of the models**: + +- **test-derived**: relatively small order given by the number of measured DOFs \\(n\\) +- **analytical**: generally an order of magnitude greater than \\(n\\) + +There is then a desire to **refine** one or other model to **bring them both to the same size** for meaningful comparison. + +However, all the refinements involve **approximations** which means that a compromise has been made in order to achieve the greater degree of compatibility which is desired. + + +#### Complex-to-real conversion {#complex-to-real-conversion} + +As we usually don't know the nature, extend and distribution of damping present in the system, the analytical model is chosen to be **undamped**. +We wish here to be able to determine what would be the mode shapes of the tested structure if, by some means, we could remove the damping but leave everything else the same. +Then we should be able to compare the modes. + + +##### Simple method {#simple-method} + +This simple method is to convert the mode shape vectors from complex to real by taking the modulus of each element and by assigning a phase to each of \\(\SI{0}{\degree}\\) or \\(\SI{180}{\degree}\\). + +Any phase angle of a complex mode shape element which is between \\(\SI{-90}{\degree}\\) and \\(\SI{90}{\degree}\\) is set to \\(\SI{0}{\degree}\\), while those between \\(\SI{90}{\degree}\\) and \\(\SI{270}{\degree}\\) are set to \\(\SI{180}{\degree}\\). +This procedure can become difficult to apply in borderline cases when the phase is poorly defined. + + +##### Multi point excitation - Asher's method {#multi-point-excitation-asher-s-method} + +In this method, the test-derived model based on complex modes is used to synthesize the response that would be produced by the application of several simultaneous harmonic forces in order to establish what those forces would need to be in order to produce a mono-modal response vector. + +If the optimum set of excitation forces for a given mode can be found, then they represent the forces that are actually being generated by the damping in the system at resonance of that mode. +We can then deduce the dynamic properties of the structure with these effects removed. + +The sequence of steps required to determine this solution is as follows: + +1. Compute \\([\alpha(\omega)]\\) from the complex modal model +2. Determine the undamped system natural frequencies \\(\omega\_r\\) by solving the equation \\(\det|\text{Re}[\alpha(\omega)]|=0\\) +3. Calculate the mono-phase vector for each mode of interest using \\(\text{Re}[\alpha(\omega)]\\{\hat{F}\\} = \\{0\\}\\) +4. Calculate the undamped system mode shapes \\(\\{\psi\_u\\}\\) using the just-derived force vector: \\(\\{\psi\_u\\} = \text{Im}[\alpha(\omega)]\\{\hat{F}\\}\\) + + +##### Matrix transformation {#matrix-transformation} + +We here seek a numerical solution to the expression linking the known damped modes and the unknown undamped modes. +The steps are: + +1. Assume that \\(\text{Re}[T\_1]\\) is unity and calculate \\(\text{Im}[T\_1]\\) from + \\[ \text{Im}[T\_1] = -[\text{Re}[\phi\_d]]^T [\text{Re}[\phi\_d]]^{-1} [\text{Re}[\phi\_d]]^T \text{Im}[\phi\_d] \\] +2. calculate \\([M\_1]\\) and \\([K\_1]\\) from + \\[ [M\_1] = [T\_1]^T[T\_1]; \quad [K\_1] = [T\_1]^T[\lambda^2][T\_1] \\] +3. Solve the eigen-problem formed by \\([M\_1]\\) and \\([K\_1]\\) leading to + \\[ [\omega\_r^2]; \quad [T\_2] \\] +4. Calculate the real modes using + \\[ [\phi\_u] = [\phi\_d][T\_1][T\_2] \\] + + +#### Expansion of models {#expansion-of-models} + +
+
+ +An important model refinement is called **expansion** and consist of the addition to the actually measured modal data of estimates for selected DOFs which were not measured for one reason or another. + +
+ +Prior to conducting each modal test, decisions have to be made as to **which of the many DOFs** that exist on the structure **will be measured**. +These decisions are made for various practical reasons: + +- Limited test time +- Inaccessibility of some DOFs +- Anticipated low importance of motion in certain DOFs + +**Three approaches** to the expansion of measured modes will be mentioned here: + +1. **Geometric interpolation** using spline functions +2. Expansion using the analytical model's **spatial properties** +3. Expansion using the analytical model's **modal properties** + +
+
+ +In all three approached, we are in effect seeking a transformation matrix \\([T]\\) that allows us to construct a long eigenvector \\(\\{\phi\\}\_{N\times 1}\\) from knowledge of a short (incomplete) one \\(\\{\phi\\}\_{n\times 1}\\): +\\[ \\{\phi\\}\_{N\times 1} = [T]\_{N\times n} \\{\phi\\}\_{n\times 1} \\] + +
+ + +##### Interpolation {#interpolation} + +Simple interpolation has a limited range of application and can only be used on structures which have **large regions of relatively homogeneous structure**: those with joints of abrupt changes are must less likely to respond to this form of expansion. + +The method is simply geometric interpolation between the measured points themselves, such as by fitting a polynomial function through the measured points. + + +##### Expansion using theoretical spatial model - Kidder's method {#expansion-using-theoretical-spatial-model-kidder-s-method} + +This interpolation uses a **theoretical model's mass and stiffness matrices** in a form of an inverse Guyan reduction procedure. + +If we **partition** the eigenvector of interest, \\(\\{\phi\_A\\}\_r\\), into: + +- the DOFs to be included: \\(\\{{}\_A\phi\_1\\}\_r\\) +- the DOFs which are not available from the measurements: \\(\\{{}\_A\phi\_2\\}\_r\\) + +then we may write: + +\begin{equation\*} + \left( \begin{bmatrix} + {}\_AK\_{11} & {}\_AK\_{12}\\\\\\ + {}\_AK\_{21} & {}\_AK\_{22} +\end{bmatrix} - \omega\_r^2 \begin{bmatrix} + {}\_AM\_{11} & {}\_AM\_{12}\\\\\\ + {}\_AM\_{21} & {}\_AM\_{22} +\end{bmatrix} \right) \begin{Bmatrix}{}\_A\phi\_1\\\{}\_A\phi\_2\end{Bmatrix} = \\{0\\} +\end{equation\*} + +We can use this relationship between the measured and unmeasured DOFs as the basic for an expansion of the incomplete measured mode shapes: +\\[ \\{{}\_A\phi\_2\\}\_r = [T\_{21}]\\{{}\_A\phi\_1\\}\_r \\] +with +\\[ [T\_{12}] = - \left( [{}\_AK\_{22}] - \omega\_r^2[{}\_AM\_{22}] \right)^{-1} \left( [{}\_AK\_{21}] - \omega\_r^2[{}\_AM\_{21}] \right) \\] + +The relation between the incomplete measured vector to the complete expanded vector is then + +\begin{equation} + \tcmbox{ \\{\tilde{\phi}\_X\\}\_r = \begin{Bmatrix} + {}\_X\phi\_1 \\ {}\_X\tilde{\phi}\_2 +\end{Bmatrix} = \begin{bmatrix} + [I] \\ [T\_{21}] +\end{bmatrix} \\{{}\_X\phi\_1\\}\_r } +\end{equation} + + +##### Expansion using analytical model mode shapes {#expansion-using-analytical-model-mode-shapes} + +This method uses the analytical model for the interpolation, but is based on the **mode shapes derived from the analytical modal spatial matrices**, rather than on these matrices themselves. + +We may write the following expression which relates the experimental model mode shapes to those of the analytical model: + +\begin{equation\*} + \begin{Bmatrix} + {}\_X\phi\_1 \\ {}\_X\phi\_2 +\end{Bmatrix} = \begin{bmatrix} + [{}\_A\Phi\_{11}] & [{}\_A\Phi\_{12}] \\\\\\ + [{}\_A\Phi\_{21}] & [{}\_A\Phi\_{22}] +\end{bmatrix} \begin{Bmatrix} + \gamma\_1 \\ \gamma\_2 +\end{Bmatrix}\_r +\end{equation\*} + +The basic of this method is to assume that the measured mode shape submatrix can be represented exactly by the simple relationship (which assumes that \\(\\{\gamma\_2\\}\_r\\) can be taken to be zero): + +\begin{equation} + \\{{}\_X\phi\_1\\}\_r = [{}\_A\Phi\_{11}] \\{\gamma\_1\\}\_r +\end{equation} + +so that an estimate can be provided for the unmeasured part of the eigenvector from + +\begin{equation} + \begin{aligned} + \\{{}\_X\tilde{\phi}\_2\\} &= [T\_{21}] \\{{}\_X\phi\_1\\}\_r \\\\\\ + &= [{}\_A\Phi\_{21}][{}\_A\Phi\_{11}]^{-1} \\{{}\_X\phi\_1\\}\_r + \end{aligned} +\end{equation} + +Thus, we can write the full transformation as: + +\begin{equation\*} + \tcmbox{ \\{\tilde{\phi}\_X\\}\_r = \begin{Bmatrix} + {}\_X\phi\_1 \\ {}\_X\tilde{\phi}\_2 +\end{Bmatrix} = \begin{bmatrix} + [{}\_A\Phi\_{11}] \\ [{}\_A\Phi\_{21}] +\end{bmatrix} [{}\_A\Phi\_{11}]^{-1} \\{{}\_X\phi\_1\\}\_r } +\end{equation\*} + +This formula can be generalized to a single expression which covers several measured modes: + +\begin{equation\*} + \tcmbox{[\tilde{\Phi}\_X]\_{N\times m\_X} = \underbrace{[\Phi\_A]\_{N\times m\_A} [{}\_A\Phi\_{11}]\_{m\_A \times n}^+}\_{[T]\_{N\times n}} [{}\_A\Phi\_1]\_{n\times m\_X}} +\end{equation\*} + +where \\(m\_X\\) and \\(m\_A\\) are the number of experimental and analytical modes used, respectively. + +Other formulations for \\([T]\\) are possible, they involve various combinations of the available experimental mode shape data and those from the analytical model: + +\begin{equation} + \begin{aligned} + [T\_{(1)}] &= [\Phi\_A][{}\_A\Phi\_1]^+ & \text{A model - based} \\\\\\ + [T\_{(2)}] &= [\Phi\_A][{}\_X\Phi\_1]^+ & \text{X model - based} \\\\\\ + [T\_{(3)}] &= \begin{bmatrix}{}\_X\Phi\_1\\\{}\_A\Phi\_2\end{bmatrix}[{}\_A\Phi\_1]^+ & \text{Mixed/A - based} \\\\\\ + [T\_{(4)}] &= \begin{bmatrix}{}\_X\Phi\_1\\\{}\_A\Phi\_2\end{bmatrix}[{}\_X\Phi\_1]^+ & \text{Mixed/X - based} + \end{aligned} +\end{equation} + +It must be pointed out that all the above formula are approximate because of the initial assumption that the higher modes are not required to be included in the process (that \\(\\{\gamma\_2\\}\\) is zero). + + +#### Reduction of models {#reduction-of-models} + +The model reduction, which is the inverse of the expansion process, is used when it is decided to obtain compatibility between two otherwise disparate models by reducing the size of the larger of the two models (almost always, the analytical model). + +Model reduction has less importance nowadays as computing power is widely available and because such reduction introduces approximations. + +There are basically **two different types of model reduction**, both of which are applied to the spatial model (as opposed to the modal model as is the case in model expansion), and both achieve the same end result of yielding a smaller order model, with system matrices which are \\(n\times n\\) instead of \\(N\times N\\): + +1. a **condensed model** which seeks to represent the entire structure completely at a smaller number of DOFs. This type of model introduces approximation. +2. a **reduced model** which has removed information related to the DOFs that are eliminated from the model, and which is thus an incomplete model. However, for the retained DOFs, no information is lost. + +Let's summarize the basic feature of model reduction by **condensation**. +The basic equation of motion for the original model can be expressed as: +\\[ [M] \ddot{x} + [K]\\{x\\} = \\{f\\} \\] +and this can be partitioned into the **kept DOFs** \\(\\{x\_1\\}\\) and the **eliminated DOFs** \\(\\{x\_2\\}\\) (which by definition cannot have any excitation forces applied to them): + +\begin{equation\*} +\begin{bmatrix} + M\_{11} & M\_{12} \\\\\\ + M\_{21} & M\_{22} +\end{bmatrix} \begin{Bmatrix} + \ddot{x}\_1 \\ \ddot{x}\_2 +\end{Bmatrix} + \begin{bmatrix} + K\_{11} & K\_{12} \\\\\\ + K\_{21} & K\_{22} +\end{bmatrix} \begin{Bmatrix} + x\_1 \\ x\_2 +\end{Bmatrix} = \begin{Bmatrix} + f\_1 \\ 0 +\end{Bmatrix} +\end{equation\*} + +A relationship between the kept and eliminated DOFs can be written in the form: + +\begin{equation} + \begin{Bmatrix} + x\_1 \\ x\_2 +\end{Bmatrix}\_{N\times 1} = \begin{bmatrix} + [I] \\ [T] +\end{bmatrix}\_{N\times n} \\{x\_1\\}\_{n\times 1} +\end{equation} + +where the transformation matrix \\([T]\\) can be defined by + +\begin{equation\*} + [T] = (1 - \beta)\left(-[K\_{22}]^{-1}[K\_{21}]\right) + \beta\left(-[M\_{22}]^{-1}[M\_{21}]\right) +\end{equation\*} + +in which \\(\beta\\) is a reduction coefficient whose limiting values are \\(\beta = 0\\) for **static reduction** and \\(\beta = 1\\) for **dynamic reduction**. + +The **reduced mass and stiffness matrices** which are produced by this process are: + +\begin{align\*} + \left[M^R\right]\_{n\times n} &= \begin{bmatrix}[I] & [T]^T\end{bmatrix}\_{n\times N} \begin{bmatrix} + M\_{22} & M\_{21} \\ M\_{12} & M\_{22} +\end{bmatrix}\_{N\times N} \begin{bmatrix} + [I] \\ [T] +\end{bmatrix}\_{N\times n}\\\\\\ + \left[K^R\right]\_{n\times n} &= \begin{bmatrix}[I] & [T]^T\end{bmatrix}\_{n\times N} \begin{bmatrix} + K\_{22} & K\_{21} \\ K\_{12} & K\_{22} +\end{bmatrix}\_{N\times N} \begin{bmatrix} + [I] \\ [T] +\end{bmatrix}\_{N\times n} +\end{align\*} + +The two limiting cases of static and dynamic reduction are of particular interest. +In each case, one of the two system matrices is unchanged and the other one is: + +\begin{align\*} + \beta = 1:\ &[M^{R\text{static}}] = [M\_{12}] \left(-[M\_{22}]^{-1}[M\_{21}]\right)^{-1} + [M\_{11}]\\\\\\ + &[K^{R\text{static}}] = [K]\\\\\\ + \beta = 0:\ &[M^{R\text{dynamic}}] = [M]\\\\\\ + &[K^{R\text{dynamic}}] = [K\_{12}] \left(-[K\_{22}]^{-1}[K\_{21}]\right)^{-1} + [K\_{11}] +\end{align\*} + +These reduction procedure can provide useful approximate models of the structure if an optimum choice of which DOFs to retain and which can be eliminated is made. +However, a reduced theoretical model of this type does not correspond to a similarly low-order model which is obtained from experiments since that is formed simply by ignoring the eliminated DOFs. +The measured data for the included DOFs are the same no matter how many DOFs are eliminated. +Thus, there are inherent difficulties involved in using this mixture of condensed (but complete) theoretical models and reduced (but incomplete) experimental models. + + +### Display of modal models {#display-of-modal-models} + +One of the attraction of the modal model is possibility of obtaining a **graphic display** of its form by plotting the mode shapes. + +There are basically two choices for the graphical display of a modal model: + +- a **static plot** +- a **dynamic** (animated) **display** + + +#### Static Displays {#static-displays} + + +##### Deflected shapes {#deflected-shapes} + +A static display is often adequate for depicting relatively simple mode shapes. +Measured coordinates of the test structure are first linked as shown on figure [fig:static_display](#fig:static_display) (a). +Then, the grid of measured coordinate points is redrawn on the same plot but this time displaced by an amount proportional to the corresponding element in the mode shape vector as shown on figure [fig:static_display](#fig:static_display) (b). +The elements in the vector are scaled according the normalization process used (usually mass-normalized), and their absolute magnitudes have no particular significance. + + + +{{< figure src="/ox-hugo/ewins00_static_display.png" caption="Figure 31: Static display of modes shapes. (a) basic grid (b) single-frame deflection pattern (c) multiple-frame deflection pattern (d) complex mode (e) Argand diagram - quasi-real mode (f) Argand diagram - complex mode" >}} + +It is customary to select the largest eigenvector element and to scale the whole vector by an amount that makes that displacement on the plot a viable amount. + + +##### Multiple frames {#multiple-frames} + +If a series of deflection patterns that has been computed for a different instant of time are superimposed, we obtain a result as shown on figure [fig:static_display](#fig:static_display) (c). +Some indication of the motion of the structure can be obtained, and the points of zero motion (nodes) can be clearly identified. + +It is also possible, in this format, to give some indication of the essence of complex modes, as shown in figure [fig:static_display](#fig:static_display) (d). +Complex modes do not, in general, exhibit fixed nodal points. + + +##### Argand diagram plots {#argand-diagram-plots} + +Another form of representation which is useful for complex modes is the representation of the individual complex elements of the eigenvectors on a polar plot, as shown in the examples of figure [fig:static_display](#fig:static_display) (e) and (f). +Although there is no attempt to show the physical deformation of the actual structure in this format, the complexity of the mode shape is graphically displayed. + + +#### Dynamic Display {#dynamic-display} + +The coordinates for the basic picture are computed and stored for multiple fractions of a cycle. +Then, 10 to 20 frames are stored and displayed with an update rate which is suitable to give a clear picture of the distortion of the structure during vibration. + +The dynamic character of animation is the only really effective way to view modal complexity and is very useful to display complex modes. + + +#### Interpretation of mode shape displays {#interpretation-of-mode-shape-displays} + +There are a number of features associated with mode shape displays that warrant a mention in the context of ensuring that the **correct interpretation** is made from viewing these displays. + +The first concerns the consequences of viewing an **incomplete model**. +In that case, there are no mode shape data from some of the points which comprise the grid which outlines the structure, and the indicated result is zero motion of those DOFs and this can be very **misleading**. +For instance, if we measure the displacement of grid points in only one direction, \\(x\\) for instance, then the shape display will show significant x-direction motion of those points with no motion in the other transverse directions. +We then tend to interpret this as a motion which is purely in the x-direction which may be clearly not true. + +The second problem arises when the **grid of measurement points** that is chosen to display the mode shapes is **too coarse in relation to the complexity of the deformation patterns** that are to be displayed. +This can be illustrated using a very simple example: suppose that our test structure is a straight beam, and that we decide to use just three response measurements points. +If we consider the first six modes of the beam, whose mode shapes are sketched in figure [fig:beam_modes](#fig:beam_modes), then we see that with this few measurement points, modes 1 and 5 look the same as do modes 2, 4 and 6. +All the higher modes will be indistinguishable from these first few. +This is a well known problem of **spatial aliasing**. + + + +{{< figure src="/ox-hugo/ewins00_beam_modes.png" caption="Figure 32: Misinterpretation of mode shapes by spatial aliasing" >}} + + +### Response models {#response-models} + +
+
+ +There are two main requirements demanded for a response model: + +- the capability of regeneration "theoretical" curves for the FRFs actually measured and analyzed +- synthesizing the other response functions which were not measured + +
+ +In general, the form of response model with which we are concerned is an **FRF matrix** whose order is dictated by the number of coordinates \\(n\\) included in the test. +Also, as explained, it is normal in practice to measured and to analyze just a **subset of the FRF matrix** but rather to measure the full FRF matrix. +Usually **one column** or **one row** with a few additional elements are measured. +Thus, if we are to construct an acceptable response model, it will be necessary to synthesize those elements which have not been directly measured. +However, in principle, this need present no major problem as it is possible to compute the full FRF matrix from a modal model using: + +\begin{equation} + \tcmbox{[H]\_{n\times n} = [\Phi]\_{n\times m} [\lambda\_r^2 - \omega^2]\_{m\times m}^{-1} [\Phi]\_{m\times n}^T} +\end{equation} + + +#### Regenerated FRF curves {#regenerated-frf-curves} + +It is usual practice to regenerate an FRF curve using the results from the modal analysis as a mean of **checking the success of that analysis**. + +It should be noted that in order to construct an acceptable response model, it is essential that all the modes in the frequency range of interest be included, and that suitable residual terms are added to take account of out-of-range modes. +In this respect, the demands of the response model are more stringent that those of the modal model. + + +#### Synthesis of FRF curves {#synthesis-of-frf-curves} + +One of the implications of equation [eq:regenerate_full_frf_matrix](#eq:regenerate_full_frf_matrix) is that **it is possible to synthesize the FRF curves which were not measured**. +This arises because if we measured three individual FRF such as \\(H\_{ik}(\omega)\\), \\(H\_{jk}(\omega)\\) and \\(K\_{kk}(\omega)\\), then modal analysis of these yields the modal parameters from which it is possible to generate the FRF \\(H\_{ij}(\omega)\\), \\(H\_{jj}(\omega)\\), etc. + +However, it must be noted that there is an important **limitation to this procedure** which is highlighted in the example below. + +
+
+ +As an example, suppose that FRF data \\(H\_{11}\\) and \\(H\_{21}\\) are measured and analyzed in order to synthesize the FRF \\(H\_{22}\\) initially unmeasured. +The predict curve is compared with the measurements on figure [fig:H22_without_residual](#fig:H22_without_residual). +Clearly, the agreement is poor and would tend to indicate that the measurement/analysis process had not been successful. +However, the synthesized curve contained only those terms relating to the modes which had actually been studied from \\(H\_{11}\\) and \\(H\_{21}\\) and this set of modes did not include **all** the modes of the structure. +Thus, \\(H\_{22}\\) **omitted the influence of out-of-range modes**. +The inclusion of these two additional terms (obtained here only after measuring and analyzing \\(H\_{22}\\) itself) resulted in the greatly improved predicted vs measured comparison shown in figure [fig:H22_with_residual](#fig:H22_with_residual). + +
+ + +
+ Table 24: + Synthesized FRF plot +
+ +| ![](/ox-hugo/ewins00_H22_without_residual.png) | ![](/ox-hugo/ewins00_H22_with_residual.png) | +|--------------------------------------------------------|-----------------------------------------------------------| +| Using measured modal data only | After inclusion of residual terms | +| width=\linewidth | width=\linewidth | + +The appropriate expression for a "correct" response model, derived via a set of modal properties is thus + +\begin{equation} + [H] = [\Phi] [\lambda\_r^2 - \omega^2]^{-1} [\Phi]^T + [\text{Res}] +\end{equation} + +In order to obtain all the data necessary to form such a model, we must first derive the modal model on which it is based and then find some means of **determining the elements in the residual matrix** \\([\text{Res}]\\). +This latter task may be done in several ways: + +- It may be most accurately achieved by **measuring all** (or at least over half) **of the elements in the FRF matrix**, but this would increase a lot the quantity of data to be measured. +- **Extend the frequency range of the modal test** beyond that over which the model is eventually required. + In this way, much of the content of the residual terms is included in separate modes and their actual magnitudes can be reduced to relatively unimportant dimensions. +- Try to access **which of the many FRF elements are liable to need large residual terms** and to make sure that these are included in the list of those which are measured and analyzed. + We noted earlier that it is the point mobilities which are expected to have the highest-valued residuals and the remote transfers which will have the smallest. + Thus, the significant terms in the \\([\text{Res}]\\) matrix will generally be grouped close to the leading diagonal, and this suggests **making measurements of most of the point mobility parameters**. + + +#### Direct measurement {#direct-measurement} + +It should be noted that it is quite possible to develop a response model by measuring and analyzing all the elements in one half of the FRF matrix (this being symmetric) and by storing the results of this process without constructing a modal model. +This procedure clearly solves the residual problem discussed above, but it will introduce **inconsistencies** into to model which renders it unsatisfactory. + + +#### Transmissibilities {#transmissibilities} + +One vibration parameter which has not been mentioned so far is that of **transmissibility**. +This is a quantity which is quite widely used in vibration engineering practice to indicate the relative vibration levels between two points. + +In general, transmissibility is considered to be a frequency dependent response function \\(T\_{jk}(\omega)\\) which defines the ratio between the response levels at two DOFs \\(j\\) and \\(k\\). +Simply defined, we can write: + +\begin{equation} + T\_{jk} (\omega) = \frac{X\_j e^{i\omega t}}{X\_k e^{i\omega t}} +\end{equation} + +but, in fact, we need also to **specify the excitation conditions that give rise to the two responses** in question and these are missing from the above definition which is thus not rigorous. +It does not give us enough information to be able to reproduce the conditions which have been used to measured \\(T\_{jk}(\omega)\\). + +
+
+ +If the transmissibility is measured during a modal test which has a single excitation, say at DOF \\(i\\), then we can define the transmissibility thus obtained more precisely: + +\begin{equation} + {}\_iT\_{jk}(\omega) = \frac{H\_{ji}(\omega)}{H\_{ki}(\omega)} +\end{equation} + +
+ +In general, the transmissibility **depends significantly on the excitation point** (\\({}\_iT\_{jk}(\omega) \neq {}\_qT\_{jk}(\omega)\\) where \\(q\\) is a different DOF than \\(i\\)) and it is shown on figure [fig:transmissibility_plots](#fig:transmissibility_plots). +This may explain why transmissibilities are not widely used in modal analysis. + + + +{{< figure src="/ox-hugo/ewins00_transmissibility_plots.png" caption="Figure 33: Transmissibility plots" >}} + + +#### Base excitation {#base-excitation} + +The one application area where transmissibilities can be used as part of modal testing is in the case of **base excitation**. +Base excitation is a type of test where the input is measured as a response at the drive point \\(x\_0(t)\\), instead of as a force \\(f\_1(t)\\), as illustrated in figure [fig:base_excitation_configuration](#fig:base_excitation_configuration). + +We can show that it is possible to determine, from measurements of \\(x\_i\\) and \\(x\_0\\), modal properties of natural frequency, damping factor and **unscaled** mode shape for each of the modes that are visible in the frequency range of measurement. +The fact that the excitation force is not measured is responsible for the lack of formal scaling of the mode shapes. + + +
+ Table 25: + Base excitation configuration +
+ +| ![](/ox-hugo/ewins00_conventional_modal_test_setup.png) | ![](/ox-hugo/ewins00_base_excitation_modal_setup.png) | +|---------------------------------------------------------|-------------------------------------------------------| +| Conventional modal test setup | Base excitation setup | +| height=4cm | height=4cm | + + +### Spatial models {#spatial-models} + +It would appear from the basic orthogonality properties of the modal model that there exists a simple means of constructing a spatial model from the modal model, thus this is not so. +We have that: + +\begin{equation} + \begin{aligned} + [\Phi]^T[M][\Phi] &= [I]\\\\\\ + [\Phi]^T[K][\Phi] &= [\lambda\_r^2] + \end{aligned} +\end{equation} + +from which is would appear that we can write + +\begin{equation} + \begin{aligned} + [M] &= [\Phi]^{-T} [I] [\Phi]^{-1}\\\\\\ + [K] &= [\Phi]^{-T} [\lambda\_r^2] [\Phi]^{-1} + \end{aligned} +\end{equation} + +However, equation [eq:m_k_from_modes](#eq:m_k_from_modes) is **only applicable when we have available the complete \\(N \times N\\) modal model**. + +It is much more usual to have an incomplete model in which the eigenvector matrix is rectangle and, as such, is non-invertible. +One step which can be made using the incomplete data is the construction of "pseudo" flexibility and inverse-mass matrices. +This is accomplished using the above equation in the form: + +\begin{equation} + \begin{aligned} + [K]\_{n\times n}^{-1} &= [\Phi]\_{n\times m} [\lambda\_r^2]\_{m\times m}^{-1} [\Phi]\_{m\times n}^T\\\\\\ + [M]\_{n\times n}^{-1} &= [\Phi]\_{n\times m} [\Phi]\_{m\times n}^T + \end{aligned} +\end{equation} + +Because the rank of each pseudo matrix is less than its order, it cannot be inverted and so we are unable to construct stiffness or mass matrix from this approach. + +# Bibliography +Ewins, D., *Modal testing: theory, practice and application* (2000), Baldock, Hertfordshire, England Philadelphia, PA: Wiley-Blackwell. [↩](#12ff508e9095d666cf081e3c5a6a4cce) diff --git a/content/book/fleming14_desig_model_contr_nanop_system.md b/content/book/fleming14_desig_model_contr_nanop_system.md new file mode 100644 index 0000000..8512631 --- /dev/null +++ b/content/book/fleming14_desig_model_contr_nanop_system.md @@ -0,0 +1,21 @@ ++++ +title = "Design, modeling and control of nanopositioning systems" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: + + +Reference +: (Andrew Fleming \& Kam Leang, 2014) + +Author(s) +: Fleming, A. J., & Leang, K. K. + +Year +: 2014 + +# Bibliography +Fleming, A. J., & Leang, K. K., *Design, modeling and control of nanopositioning systems* (2014), : Springer International Publishing. [↩](#1851788e0c4aa5b06afe3362c73ea5eb) diff --git a/content/book/horowitz15_art_of_elect_third_edition.md b/content/book/horowitz15_art_of_elect_third_edition.md new file mode 100644 index 0000000..45a8cc2 --- /dev/null +++ b/content/book/horowitz15_art_of_elect_third_edition.md @@ -0,0 +1,20 @@ ++++ +title = "The art of electronics - third edition" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Reference Books]({{< relref "reference_books" >}}), [Electronics]({{< relref "electronics" >}}) + +Reference +: (Horowitz, 2015) + +Author(s) +: Horowitz, P. + +Year +: 2015 + +# Bibliography +Horowitz, P., *The art of electronics - third edition* (2015), New York, NY, USA: Cambridge University Press. [↩](#b6e665e07855d8348f2f3aa70ec9b563) diff --git a/content/book/leach14_fundam_princ_engin_nanom.md b/content/book/leach14_fundam_princ_engin_nanom.md new file mode 100644 index 0000000..a364dea --- /dev/null +++ b/content/book/leach14_fundam_princ_engin_nanom.md @@ -0,0 +1,93 @@ ++++ +title = "Fundamental principles of engineering nanometrology" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Metrology]({{< relref "metrology" >}}) + +Reference +: (Richard Leach, 2014) + +Author(s) +: Leach, R. + +Year +: 2014 + + +## Measurement of angles {#measurement-of-angles} + +Unit: + +- radian for plane angle +- steradian for solid angle + +\\(1 rad \approx 55.3deg\\) + +Instrument principles: + +- subdivision: index tacle, angular gratings, polygons, ... +- ratio of two lengths: angular interferometers, sin cars, small angle generators, ... +- autocollimators with a flat mirror + + +### How that thing with two autocollimators can work? {#how-that-thing-with-two-autocollimators-can-work} + + +## Sources of error in displacement interferometry {#sources-of-error-in-displacement-interferometry} + +Two error sources: + +- error sources that are proportional to the displacement being measured \\(L\\): cumulative errors +- error sources that are independent of the displacement being measured: non-cumulative errors + + +### Thermal expansion of the metrology frame {#thermal-expansion-of-the-metrology-frame} + + +### Deadpath length {#deadpath-length} + +Deadpath length, \\(d\\), is defined as the difference in distance in air between the reference and measurement reflectors and the beam splitter when the interferometer measurement is initiated. +Deadpath error occurs when there is a non-zero deadpath and environmental conditions change during a measurement. + + +### Cosine error {#cosine-error} + +\\(\Delta l = l(1-\cos(\theta))\\) + +For small angles: \\(\Delta l = \frac{l \theta^2}{2}\\) + +The cosine error is then a second-order effect, contrary to the Abbe error which is a first order effect. +The second order nature means that cosine error quickly diminish as the alignment is improved. + + +## Latest advances in displacement interferometry {#latest-advances-in-displacement-interferometry} + +Commercial interferometers +=> fused silica optics housed in Invar mounts +=> all the optical components are mounted to one central optic to reduce the susceptibility to thermal variations + +One advantage that homodyme systems have over heterodyne systems is their ability to readily have the source fibre delivered to the interferometer. + + +### Spatially separated interferometers {#spatially-separated-interferometers} + +It uses heterodyne interferometer and one quadrant photodiode. +By knowing the beam size and detector geometry, the measurement target's angle change can be determined by differencing matched pairs of measured phase from the quadrant photodiode while the displacement is determined from the average phase over the four quadrants. + + +## Angular interferometers {#angular-interferometers} + +Determination of an angle by the ratio of two lengths. +The angular optics is used to create two parallel beam paths between the angular interferometer and the angular reflector. + +The beam that illuminates the angular optics contains two frequencies, \\(f1\\) and \\(f2\\). A polarising beam splitter in the angular interferometer splits the frequencies that travel along separate paths. + +The measurement of angles is then relative. + +This type of angular interferometer is used to measure small angles (less than \\(10deg\\)). + +# Bibliography +Leach, R., *Fundamental principles of engineering nanometrology* (2014), : Elsevier. [↩](#58bd6e601168ed1397ab2ec3cc3bab2d) diff --git a/content/book/leach18_basic_precis_engin_edition.md b/content/book/leach18_basic_precis_engin_edition.md new file mode 100644 index 0000000..e83a0b0 --- /dev/null +++ b/content/book/leach18_basic_precis_engin_edition.md @@ -0,0 +1,20 @@ ++++ +title = "Basics of precision engineering - 1st edition" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Precision Engineering]({{< relref "precision_engineering" >}}) + +Reference +: (Richard Leach \& Stuart Smith, 2018) + +Author(s) +: Leach, R., & Smith, S. T. + +Year +: 2018 + +# Bibliography +Leach, R., & Smith, S. T., *Basics of precision engineering - 1st edition* (2018), : CRC Press. [↩](#cc6e42420309d21c1aa596152d84cf8b) diff --git a/content/book/preumont18_vibrat_contr_activ_struc_fourt_edition.md b/content/book/preumont18_vibrat_contr_activ_struc_fourt_edition.md new file mode 100644 index 0000000..7ed63a9 --- /dev/null +++ b/content/book/preumont18_vibrat_contr_activ_struc_fourt_edition.md @@ -0,0 +1,1819 @@ ++++ +title = "Vibration Control of Active Structures - Fourth Edition" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Vibration Isolation]({{< relref "vibration_isolation" >}}), [Reference Books]({{< relref "reference_books" >}}), [Stewart Platforms]({{< relref "stewart_platforms" >}}) + +Reference +: (Andre Preumont, 2018) + +Author(s) +: Preumont, A. + +Year +: 2018 + + +## Introduction {#introduction} + + +### Active Versus Passive {#active-versus-passive} + +Active structure may be cheaper or lighter than passive structures of comparable performances; or they may offer performances that no passive structure could offer. + +Active is not always better, and a **control systems cannot compensate for a bad design**. Active solution should be considered only after all other passive means have been exhausted. + +Feedback control can compensate for external disturbances only in a limited frequency range (the bandwidth), the disturbances are actually amplified by the control system outside this frequency band. + + +### Vibration Suppression {#vibration-suppression} + +Vibration reduction can be achieved in many different ways: + +- **stiffening**: consists of shifting the resonance frequency of the structure beyond the frequency band of excitation +- **damping**: consists of reducing the resonance peaks by dissipating the vibration energy +- **isolation**: consists of preventing the propagation of disturbances to sensitive parts of the system + +The design of an active control system involves many issues such as how to configurate the sensors and actuators, how to secure stability and robustness. The power requirements will often determine the size of the actuators and the cost of the project. + + +### Smart Materials and Structures {#smart-materials-and-structures} + +An active structure consists of a structure provided with a set of actuators and sensors coupled by a controller. If the bandwidth of the controller includes some vibration modes of the structure, its dynamic response must be considered. + +If the set of actuators and sensors are located at discrete points of the structure, they can be treated separately. However, for smart structures, the actuators and sensors are often distributed and have a high degree of integration inside the structure, which makes a separate modelling impossible. + +Some smart materials are: + +- **Shape Memory Alloys** (SMA): recoverable strain of \\(\SI{5}{\percent}\\) induced by temperature. They can be used at low frequency and for low precision applications +- **Piezoelectric materials**: recoverable strain of \\(\SI{0.1}{\percent}\\) under electric field. They can be used as actuators as well as sensors. Two main classes: ceramics and polymers. Piezopolymers are used mostly as sensors as they require high voltage. The best-known piezoceramic is the Lead-Zirconate-Titanate (PZT). +- **Magnetostrictive materials**: recoverable strain of \\(\SI{0.15}{\percent}\\) under magnetic field +- **Magneto-Rheological fluids** (MR): consists of viscous fluids containing micronsized particules of magnetic material. When the fluid is subjected to a magnetic field, the particules create colunmar structures requiring a minimum shear stress to initiate the flow. + + +### Control Strategies {#control-strategies} + +There are two radically different approached to disturbance rejection: feedback and feedforward. + + +#### Feedback {#feedback} + + + +{{< figure src="/ox-hugo/preumont18_classical_feedback_small.png" caption="Figure 1: Principle of feedback control" >}} + +The principle of feedback is represented on figure [fig:classical_feedback_small](#fig:classical_feedback_small). The output \\(y\\) of the system is compared to the reference signal \\(r\\), and the error signal \\(\epsilon = r-y\\) is passed into a compensator \\(K(s)\\) and applied to the system \\(G(s)\\), \\(d\\) is the disturbance. +The design problem consists of finding the appropriate compensator \\(K(s)\\) such that the closed-loop system is stable and behaves in the appropriate manner. + +In the control of lightly damped structures, feedback control is used for two distinct and complementary purposes: **active damping** and **model-based feedback**. + +**Active Damping**: + +- The objective of active damping is to reduce the effect of resonant peaks on the response of the structure. +- From \\(\frac{y}{d} = \frac{1}{1 + GK}\\), this requires \\(GK \gg 1\\) near the resonances +- It can be generally be achieved without a model of the structure, with guaranteed stability, provided that the actuator and sensor are **collocated** and have perfect dynamics. + +**Model based feedback**: +The objective is to control a variable \\(y\\) to a desired value \\(r\\) in spite of the external disturbances \\(d\\). + +- From \\(\frac{y}{r} = \frac{GK}{1 + GK}\\) we see that this requires large values of \\(GK\\) in the frequency range where \\(y\approx r\\) (bandwidth) +- The bandwidth \\(\omega\_c\\) is limited by the accuracy of the model +- The disturbance rejection within the bandwidth of the control system is always compensated by an amplification of the disturbances outside the bandwidth +- When implemented digitally, the sampling frequency \\(\omega\_s\\) must always be two orders of magnitude larger than \\(\omega\_c\\) to preseve reasonably the behavior of the continuous system + + +#### Feedforward {#feedforward} + + + +{{< figure src="/ox-hugo/preumont18_feedforward_adaptative.png" caption="Figure 2: Principle of feedforward control" >}} + +The method relies on the availability of a **reference signal correlated to the primary disturbance**. +The idea is to produce a second disturbance such that is cancels the effect of the primary disturbance at the location of the sensor error. Its principle is explained in figure [2](#orga8d6c2f). + +The filter coefficients are adapted in such a way that the error signal at one or several critical points is minimized. + +There is no guarantee that the global response is reduced at other locations. This method is therefor considered as a local one. +Because it is less sensitive to phase lag than feedback, it can be used at higher frequencies (\\(\omega\_c \approx \omega\_s/10\\)). + +The table [1](#table--tab:adv-dis-type-control) summarizes the main features of the two approaches. + + +
+ Table 1: + Advantages and Disadvantages of some types of control +
+ +| | **Advantages** | **Disadvantages** | +|------------------------------------|-----------------------------------------------|---------------------------------------------------| +| **Active Damping** | - Simple to implement | - Effective only near resonance | +| | - Does not required accurate model | | +| | - Guaranteed stability (collocated) | | +| **Model Based** | - Global method | - Requires accurate model | +| | - Attenuate all disturbance within bandwidth | - Limited bandwidth | +| | | - Spillover | +| | | - Amplification of disturbances outside bandwidth | +| **Feedforward Adaptive filtering** | - No model is necessary | - Error signal required | +| | - Robust to change in plant transfer function | - Local method: may amplify vibration elsewhere | +| | - More effective for narrowband disturbance | - Large amount of real-time computation | + + +### The Various Steps of the Design {#the-various-steps-of-the-design} + + + +{{< figure src="/ox-hugo/preumont18_design_steps.png" caption="Figure 3: The various steps of the design" >}} + +The various steps of the design of a controlled structure are shown in figure [3](#orgb7c5b7f). + +The **starting point** is: + +- Mechanical system +- Performance objectives +- Specification of the disturbances + +Then the **open loop performances** can be evaluated: + +- The need for active control can be assessed +- The needed bandwidth can be roughly specified + +The next step consist of selecting the proper **type and location of sensors and actuators**: + +- The controllability and Observability are important concepts + +A **model of the structure** is developped: + +- FEM or identification +- Model reduction to limit the DoF + +If the dynamics of the sensors and actuators may significantly affect the behavior of the system, they must be included in the model before the controller design. + + +### Plant Description, Error and Control Budget {#plant-description-error-and-control-budget} + +From the block diagram of the control system (figure [fig:general_plant](#fig:general_plant)): + +\begin{align\*} +y &= (I - G\_{yu}H)^{-1} G\_{yw} w\\\\\\ +z &= T\_{zw} w = [G\_{zw} + G\_{zu}H(I - G\_{yu}H)^{-1} G\_{yw}] w +\end{align\*} + + + +{{< figure src="/ox-hugo/preumont18_general_plant.png" caption="Figure 4: Block diagram of the control System" >}} + +The frequency content of the disturbance \\(w\\) is usually described by its **power spectral density** \\(\Phi\_w (\omega)\\) which describes the frequency distribution of the meas-square value. + +
+
+ +\\[\sigma\_w = \sqrt{\int\_0^\infty \Phi\_w(\omega) d\omega}\\] + +
+ +Even more interesting for the design is the **Cumulative Mean Square** response defined by the integral of the PSD in the frequency range \\([\omega, \infty[\\). + +
+
+ +\\[\sigma\_z^2(\omega) = \int\_\omega^\infty \Phi\_z(\nu) d\nu = \int\_\omega^\infty |T\_{zw}|^2 \Phi\_w(\nu) d\nu \\] + +
+ +It is a monotonously decreasing function of frequency and describes the contribution of all frequencies above \\(\omega\\) to the mean-square value of \\(z\\). +\\(\sigma\_z(0)\\) is then the global RMS response. + +A typical plot of \\(\sigma\_z(\omega)\\) is shown figure [fig:cas_plot](#fig:cas_plot). +It is useful to **identify the critical modes** in a design, at which the effort should be targeted. + +The diagram can also be used to **assess the control laws** and compare different actuator and sensor configuration. + + + +{{< figure src="/ox-hugo/preumont18_cas_plot.png" caption="Figure 5: Error budget distribution in OL and CL for increasing gains" >}} + + +### Pseudo-inverse {#pseudo-inverse} + + +#### Under-actuated System {#under-actuated-system} + +Consider the linear system of equation: +\\[w = J v\\] +With: + +- \\(w\\) a vector with \\(m\\) components (measurements) +- \\(v\\) a vector with \\(n\\) components (inputs) +- We assume \\(m>n\\) (under-actuated) + +We seek the pseudo-inverse of \\(J\\) such that \\(v = J^+ w\\) + +The columns of \\(J\\) are the influence function of the actuators. +If the columns of \\(J\\) are independant, the Jacobian is full rang (\\(r=n\\)) and the **Moore-Penrose pseudo inverse** is: +\\[J^+ = (J^T J)^{-1} J^T\\] + + +#### Over-actuated System {#over-actuated-system} + +If there are more actuator than sensor (\\(mn\\) and \\(n>m\\). + + +#### Singular Value Decomposition {#singular-value-decomposition} + +The **Singular Value Decomposition** (SVD) is a generalization of the eigenvalue decomposition of a rectangular matrix: +\\[ J = U \Sigma V^T = \sum\_{i=1}^r \sigma\_i u\_i v\_i^T \\] +With: + +- \\(U\\) and \\(V\\) orthogonal matrices. The columns \\(u\_i\\) and \\(v\_i\\) of \\(U\\) and \\(V\\) are the eigenvectors of the square matrices \\(JJ^T\\) and \\(J^TJ\\) respectively +- \\(\Sigma\\) a rectangular diagonal matrix of dimension \\(m \times n\\) containing the square root of the common non-zero eigenvalues of \\(JJ^T\\) and \\(J^TJ\\) +- \\(r\\) is the number of non-zero singular values of \\(J\\) + +The pseudo-inverse of \\(J\\) is: +\\[ J^+ = V\Sigma^+U^T = \sum\_{i=1}^r \frac{1}{\sigma\_i} v\_i u\_i^T \\] + +The conditioning of the Jacobian is measured by the **condition number**: +\\[ c(J) = \frac{\sigma\_{max}}{\sigma\_{min}} \\] + +When \\(c(J)\\) becomes large, the most straightforward way to handle the ill-conditioning is to truncate the smallest singular value out of the sum. +This will have usually little impact of the fitting error while reducing considerably the actuator inputs \\(v\\). + + +## Some Concepts in Structural Dynamics {#some-concepts-in-structural-dynamics} + + +### Equation of Motion of a Discrete System {#equation-of-motion-of-a-discrete-system} + +The general form of the equation of motion governing the dynamic equilibrium between the external, elastic, inertia and damping forces acting on a discrete, flexible structure with a finite number \\(n\\) of degrees of freedom is + +
+
+ +\begin{equation} + M \ddot{x} + C \dot{x} + K x = f +\end{equation} + +With: + +- \\(x\\) is the vector of generalized displacements (translations and rotations) +- \\(f\\) is the vector of generalized forces (point forces and torques) +- \\(M\\), \\(C\\) and \\(K\\) are respectively the mass, damping and stiffness matrices; they are symmetric and semi-positive definite + +
+ +The damping matrix \\(C\\) represents the various dissipation mechanisms in the structure, which are usually poorly known. One of the popular hypotheses is the Rayleigh damping. + +
+
+ +\begin{equation} + C = \alpha M + \beta K +\end{equation} + +
+ +\\(\alpha\\) and \\(\beta\\) are selected to fit the structure under consideration. + + +### Vibration Modes {#vibration-modes} + +Consider the free response of an undamped system of order \\(n\\): +\\[ M\ddot{x} + K x = 0 \\] + +If one tries a solution of the form \\(x = \phi\_i e^{j\omega\_i t}\\), \\(\phi\_i\\) and \\(\omega\_i\\) must statisfy the eigenvalue problem +\\[ (K - \omega\_i^2 M)\phi\_i = 0 \\] +with: + +- \\(\omega\_i\\): the **natural frequency** +- \\(\phi\_i\\): the corresponding **mode shape** + +The number of mode shapes is equal to the number of degrees of freedom \\(n\\). + +The mode shapes are orthogonal with respect to the stiffness and mass matrices: + +\begin{align} + \phi\_i^T M \phi\_j &= \mu\_i \delta\_{ij} \\\\\\ + \phi\_i^T K \phi\_j &= \mu\_i \omega\_i^2 \delta\_{ij} +\end{align} + +With \\(\mu\_i\\) the **modal mass** (also called the generalized mass) of mode \\(i\\). + + +### Modal Decomposition {#modal-decomposition} + + +#### Structure Without Rigid Body Modes {#structure-without-rigid-body-modes} + +Let perform a change of variable from physical coordinates \\(x\\) to modal coordinates \\(z\\). + +
+
+ +\begin{equation} + x = \Phi z +\end{equation} + +With: + +- \\(\Phi = [\phi\_1, \phi\_2, ..., \phi\_n]\\) the matrix of the mode shapes +- \\(z\\) the vector of modal amplitudes + +
+ +The dynamic equation of the system becomes: +\\[ M \Phi \ddot{z} + C \Phi \dot{z} + K \Phi z = f \\] + +If we left multiply the equation by \\(\Phi^T\\) and we use the orthogonalily relationships: +\\[ diag(\mu\_i) \ddot{z} + \Phi^T C \Phi + diag(\mu\_i \omega\_i^2) z = \Phi^T f \\] + +If \\(\Phi^T C \Phi\\) is diagonal, the **damping is said classical or normal**. In this case: +\\[ \Phi^T C \Phi = diag(2 \xi\_i \mu\_i \omega\_i) \\] + +One can verify that the Rayleigh damping [eq:rayleigh_damping](#eq:rayleigh_damping) complies with this condition with modal damping ratios \\(\xi\_i = \frac{1}{2} ( \frac{\alpha}{\omega\_i} + \beta\omega\_i )\\). + +And we obtain decoupled modal equations [eq:modal_eom](#eq:modal_eom). + +
+
+ +\begin{equation} + \ddot{z} + 2 \xi \Omega \dot{z} + \Omega^2 z = z^{-1} \Phi^T f +\end{equation} + +with: + +- \\(\xi = diag(\xi\_i)\\) +- \\(\Omega = diag(\omega\_i)\\) +- \\(\mu = diag(\mu\_i)\\) + +
+ +Typical values of the modal damping ratio are summarized on table [tab:damping_ratio](#tab:damping_ratio). + + +
+ Table 2: + Typical Damping ratio +
+ +| **Damping Ratio** | **Application** | +|--------------------------------|-------------------------| +| \\(\xi \simeq 0.001 - 0.005\\) | Space structures | +| \\(\xi \simeq 0.01 - 0.02\\) | Mechanical engineering | +| \\(\xi \simeq 0.05\\) | Civil engineering | +| \\(\xi \simeq 0.2\\) | When ground is involved | + +The assumption of classical damping is often justified for light damping, but it is questionable when the damping is large. + +If one accepts the assumption of classical damping, the only difference between equation [eq:general_eom](#eq:general_eom) and [eq:modal_eom](#eq:modal_eom) lies in the change of coordinates. +However, in physical coordinates, the number of degrees of freedom is usually very large. +If a structure is excited in by a band limited excitation, its response is dominated by the modes whose natural frequencies are inside the bandwidth of the excitation and the equation [eq:modal_eom](#eq:modal_eom) can often be restricted to theses modes. +Therefore, the number of degrees of freedom contribution effectively to the response is **reduced drastically** in modal coordinates. + + +#### Dynamic Flexibility Matrix {#dynamic-flexibility-matrix} + +If we consider the steady-state response of equation [eq:general_eom](#eq:general_eom) to harmonic excitation \\(f=F e^{j\omega t}\\), the response is also harmonic \\(x = Xe^{j\omega t}\\). The amplitude of \\(F\\) and \\(X\\) is related by: +\\[ X = G(\omega) F \\] + +Where \\(G(\omega)\\) is called the **Dynamic flexibility Matrix**: +\\[ G(\omega) = (-\omega^2 M + j\omega C + K)^{-1} F \\] + +From the modal expansion of the dynamic flexibility matrix can be obtained by coordinate transformation \\(x = \phi z\\) and we obtain: + +\begin{equation} + G(\omega) = \sum\_{i=1}^n \frac{\phi\_i \phi\_i^T}{\mu\_i \omega\_i^2} D\_i(\omega) +\end{equation} + +With: + +- \\(D\_i(\omega)\\) is the dynamic amplification factor of mode \\(i\\) given by + +\begin{equation} + D\_i(\omega) = \frac{1}{1 - \omega^2/\omega\_i^2 + 2 j \xi\_i \omega/\omega\_i} +\end{equation} + + + +{{< figure src="/ox-hugo/preumont18_neglected_modes.png" caption="Figure 6: Fourier spectrum of the excitation \\(F\\) and dynamic amplitification \\(D\_i\\) of mode \\(i\\) and \\(k\\) such that \\(\omega\_i < \omega\_b\\) and \\(\omega\_k \gg \omega\_b\\)" >}} + +If the excitation has a limited bandwidth \\(\omega\_b\\), the contribution of the high frequency modes \\(\omega\_k \gg \omega\_b\\) can be evaluated by assuming \\(D\_k(\omega) \approx 1\\) (as shown on figure [fig:neglected_modes](#fig:neglected_modes)). + +And \\(G(\omega)\\) can be rewritten on terms of the **low frequency modes only**: +\\[ G(\omega) \approx \sum\_{i=1}^m \frac{\phi\_i \phi\_i^T}{\mu\_i \omega\_i^2} D\_i(\omega) + R \\] + +The quasi-static correction of the high frequency modes \\(R\\) is called the **residual mode**. This introduces a **feedthrough** component in the transfer matrix. + + +#### Structure with Rigid Body Modes {#structure-with-rigid-body-modes} + + +### Collocated Control System {#collocated-control-system} + +
+
+ +A **collocated control system** is a control system where: + +- the actuator and the sensor are **attached to the same degree of freedom** +- they are **dual**: the product of the actuator signal and the sensor signal represents the energy exchange between the structure and the control system + +
+ + +
+ Table 3: + Examples of dual actuators and sensors +
+ +| **Actuator** | **Sensor** | +|--------------|-------------| +| Force | Translation | +| Torque | Rotation | + +The open-loop FRF of a collocated system corresponds to a diagonal component of the dynamic flexibility matrix. + +If we assumes that the collocated system is undamped and is attached to the DoF \\(k\\), the open-loop FRF is purely real: +\\[ G\_{kk}(\omega) = \sum\_{i=1}^m \frac{\phi\_i^2(k)}{\mu\_i (\omega\_i^2 - \omega^2)} + R\_{kk} \\] + +\\(G\_{kk}\\) is a monotonously increasing function of \\(\omega\\) (figure [fig:collocated_control_frf](#fig:collocated_control_frf)). + + + +{{< figure src="/ox-hugo/preumont18_collocated_control_frf.png" caption="Figure 7: Open-Loop FRF of an undamped structure with collocated actuator/sensor pair" >}} + +The amplitude of the FRF goes from \\(-\infty\\) at the resonance frequencies \\(\omega\_i\\) to \\(+\infty\\) at the next resonance frequency \\(\omega\_{i+1}\\). Therefore, in every interval, there is a frequency \\(z\_i\\) such that \\(\omega\_i < z\_i < \omega\_{i+1}\\) where the amplitude of the FRF vanishes. The frequencies \\(z\_i\\) are called **anti-resonances**. + +
+
+ +Undamped **collocated control systems** have **alternating poles and zeros** on the imaginary axis. +For lightly damped structure, the poles and zeros are just moved a little bit in the left-half plane, but they are still interlacing. + +
+ +If the undamped structure is excited harmonically by the actuator at the frequency of the transmission zero \\(z\_i\\), the amplitude of the response of the collocated sensor vanishes. That means that the structure oscillates at the frequency \\(z\_i\\) according to the mode shape shown in dotted line figure [fig:collocated_zero](#fig:collocated_zero). + + + +{{< figure src="/ox-hugo/preumont18_collocated_zero.png" caption="Figure 8: Structure with collocated actuator and sensor" >}} + +
+
+ +The frequency of the transmission zero \\(z\_i\\) and the mode shape associated are the **natural frequency** and the **mode shape** of the system obtained by **constraining the d.o.f. on which the control systems acts**. + +The open-loop zeros are asymptotic values of the closed-loop poles when the feedback gain goes to infinity. + +The open-loop poles are independant of the actuator and sensor configuration while the open-loop zeros do depend on it. + +
+ +By looking at figure [fig:collocated_control_frf](#fig:collocated_control_frf), we see that neglecting the residual mode in the modelling amounts to translating the FRF diagram vertically. That produces a shift in the location of the transmission zeros to the right. + + + +{{< figure src="/ox-hugo/preumont18_alternating_p_z.png" caption="Figure 9: Bode plot of a lighly damped structure with collocated actuator and sensor" >}} + +The open-loop transfer function of a lighly damped structure with a collocated actuator/sensor pair can be written: + +\begin{equation} + G(s) = G\_0 \frac{\Pi\_i(s^2/z\_i^2 + 2 \xi\_i s/z\_i + 1)}{\Pi\_j(s^2/\omega\_j^2 + 2 \xi\_j s /\omega\_j + 1)} +\end{equation} + +The corresponding Bode plot is represented in figure [9](#orgadf3ccb). Every imaginary pole at \\(\pm j\omega\_i\\) introduces a \\(\SI{180}{\degree}\\) phase lag and every imaginary zero at \\(\pm jz\_i\\) introduces a phase lead of \\(\SI{180}{\degree}\\). +In this way, the phase diagram is always contained between \\(\SI{0}{\degree}\\) and \\(\SI{-180}{\degree}\\) as a consequence of the interlacing property. + + +## Electromagnetic and Piezoelectric Transducers {#electromagnetic-and-piezoelectric-transducers} + + +### Introduction {#introduction} + +Transducers are critical in active structures technology. +In many applications, the actuators are the most critical part of the system; however, the sensors become very important in precision engineering where submicron amplitudes must be detected. + +Two broad categories of actuators can be distinguish: + +- **grounded actuator**: react on a fixed support. They include torque motors, force motors (shakers), tendons +- **structure borne actuator**: includes jets, reaction wheels, proof-mass actuators, piezo strips, ... + + +### Voice Coil Transducer {#voice-coil-transducer} + +A voice coil transducer is an energy transformer which converts electrical power into mechanical power and vice versa. + +The system consists of (see figure [fig:voice_coil_schematic](#fig:voice_coil_schematic)): + +- A permanent magnet which produces a uniform flux density \\(B\\) normal to the gap +- A coil which is free to move axially + + + +{{< figure src="/ox-hugo/preumont18_voice_coil_schematic.png" caption="Figure 10: Physical principle of a voice coil transducer" >}} + +We note: + +- \\(v\\) the velocity of the coil +- \\(f\\) the external force acting to maintain the coil in equilibrium againt the electromagnetic forces +- \\(e\\) the voltage difference across the coil +- \\(i\\) the current into the coil + +
+
+ +**Faraday's law**: + +\begin{equation} + e = 2\pi n r B v = T v +\end{equation} + +With \\(T = 2\pi n r B\\) is the **transducer constant**. + +**Lorentz force law**: + +\begin{equation} + f = -i 2\pi n r B = - T i +\end{equation} + +
+ +The total power delivered to the moving coil transducer is equal to the sum of the electric power and the mechanical power: +\\[ ei + fv = 0 \\] + +Thus, at any time, there is an equilibrium between the electrical power absorbed by the device and the mechanical power delivered. + + +#### Proof-Mass Actuator {#proof-mass-actuator} + +A reaction mass \\(m\\) is conected to the support structure by a spring \\(k\\) , and damper \\(c\\) and a force actuator \\(f = T i\\) (figure [fig:proof_mass_actuator](#fig:proof_mass_actuator)). + + + +{{< figure src="/ox-hugo/preumont18_proof_mass_actuator.png" caption="Figure 11: Proof-mass actuator" >}} + +If we apply the second law of Newton on the mass: +\\[ m\ddot{x} + c\dot{x} + kx = f = Ti \\] + +In the Laplace domain: +\\[ x = \frac{Ti}{ms^2 + cs + k} \\] + +The total force applied on the support is: +\\[ F = -f + cs + k = -m s^2 x = \frac{-ms^2Ti}{ms^2 + cs + k} \\] + +The transfer function between the total force and the current \\(i\\) applied to the coil is : + +
+
+ +\begin{equation} + \frac{F}{i} = \frac{-s^2 T}{s^2 + 2\xi\_p \omega\_p s + \omega\_p^2} +\end{equation} + +with: + +- \\(T\\) is the transducer constant +- \\(\omega\_p = \frac{k}{m}\\) is the natural frequency of the spring-mass system +- \\(\xi\_p\\) is the damping ratio + +
+ +Above some critical frequency \\(\omega\_c \approx 2\omega\_p\\), **the proof-mass actuator can be regarded as an ideal force generator** (figure [fig:proof_mass_tf](#fig:proof_mass_tf)). + + + +{{< figure src="/ox-hugo/preumont18_proof_mass_tf.png" caption="Figure 12: Bode plot \\(F/i\\) of the proof-mass actuator" >}} + + +#### Geophone {#geophone} + +The geophone is a transducer which behaves like an **absolute velocity sensor** above some cutoff frequency. +The voltage \\(e\\) of the coil is used as the sensor output. + +If \\(x\_0\\) is the displacement of the support and if the voice coil is open (\\(i=0\\)), the governing equations are: + +\begin{align\*} + m\ddot{x} + c(\dot{x}-\dot{x\_0}) + k(x-x\_0) &= 0\\\\\\ + T(\dot{x}-\dot{x\_0}) &= e +\end{align\*} + +By using the two equations, we obtain: + +\begin{equation} + \frac{e}{\dot{x\_0}} = \frac{-s^2 T}{s^2 + 2\xi\_p\omega\_p s + \omega\_p^2} +\end{equation} + +Above the corner frequency, the gain of the geophone is equal to the transducer constant \\(T\\). + + + +{{< figure src="/ox-hugo/preumont18_geophone.png" caption="Figure 13: Model of a geophone based on a voice coil transducer" >}} + +Designing geophones with very low corner frequency is in general difficult. Active geophones where the frequency is lowered electronically may constitute a good alternative option. + + +### General Electromechanical Transducer {#general-electromechanical-transducer} + +The consitutive behavior of a wide class of electromechanical transducers can be modelled as in figure [fig:electro_mechanical_transducer](#fig:electro_mechanical_transducer). + + + +{{< figure src="/ox-hugo/preumont18_electro_mechanical_transducer.png" caption="Figure 14: Electrical analog representation of an electromechanical transducer" >}} + +In Laplace form the constitutive equations read: + +\begin{align} + e & = Z\_e i + T\_{em} v \label{eq:gen\_trans\_e} \\\\\\ + f & = T\_{em} i + Z\_m v \label{eq:gen\_trans\_f} +\end{align} + +With: + +- \\(e\\) is the Laplace transform of the input voltage across the electrical terminals +- \\(i\\) is the input current +- \\(f\\) is the force applied to the mechanical terminals +- \\(v\\) is the velocity of the mechanical part +- \\(Z\_e\\) is the blocked electrical impedance (for \\(v=0\\)) +- \\(T\_{em}\\) is the transduction coefficient representing the electromotive force (in \\(\si{\volt\second\per\meter}\\)) +- \\(T\_{me}\\) is the transduction coefficient representing the force acting on the mechanical terminals to balance the electromagnetic force induced per unit current input (in \\(\si{\newton\per\ampere}\\)) +- \\(Z\_m\\) is the mechanical impedance measured when \\(i=0\\) + +Equation [eq:gen_trans_e](#eq:gen_trans_e) shows that the voltage across the electrical terminals of any electromechanical transducer is the sum of a contribution proportional to the current applied and a contribution proportional to the velocity of the mechanical terminals. +Thus, if \\(Z\_ei\\) can be measured and substracted from \\(e\\), a signal proportional to the velocity is obtained. + +To do so, the bridge circuit as shown on figure [fig:bridge_circuit](#fig:bridge_circuit) can be used. + +We can show that + +\begin{equation} + V\_4 - V\_2 = \frac{-Z\_b T\_{em}}{Z\_e + Z\_b} v +\end{equation} + +which is indeed a linear function of the velocity \\(v\\) at the mechanical terminals. + + + +{{< figure src="/ox-hugo/preumont18_bridge_circuit.png" caption="Figure 15: Bridge circuit for self-sensing actuation" >}} + + +### Smart Materials {#smart-materials} + +Smart materials have the ability to respond significantly to stimuli of different physical nature. +Figure [fig:smart_materials](#fig:smart_materials) lists various effects that are observed in materials in response to various inputs. + + + +{{< figure src="/ox-hugo/preumont18_smart_materials.png" caption="Figure 16: Stimulus response relations indicating various effects in materials. The smart materials corresponds to the non-diagonal cells" >}} + + +### Piezoelectric Transducer {#piezoelectric-transducer} + +Piezoelectric materials exhibits two effects described below. + +
+
+ +Ability to generate an electrical charge in proportion to an external applied force. + +
+ +
+
+ +An electric filed parallel to the direction of polarization induces an expansion of the material. + +
+ +The most popular piezoelectric materials are Lead-Zirconate-Titanate (PZT) which is a ceramic, and Polyvinylidene fluoride (PVDF) which is a polymer. + +We here consider a transducer made of one-dimensional piezoelectric material. + +
+
+ +\begin{subequations} + \begin{align} + D & = \epsilon^T E + d\_{33} T\\\\\\ + S & = d\_{33} E + s^E T + \end{align} +\end{subequations} + +With: + +- \\(D\\) is the electric displacement \\([C/m^2]\\) +- \\(E\\) is the electric field \\([V/m]\\) +- \\(T\\) is the stress \\([N/m^2]\\) +- \\(S\\) is the strain +- \\(\epsilon^T\\) is the dielectric constant under constant stress +- \\(s^E\\) is the compliance when the eletric field is constant (inverse of Young modulus) +- \\(d\_{33}\\) is the piezoelectric constant \\([m/V]\\) or \\([C/N]\\) in the poling direction of the material (convention) + +
+ + +#### Constitutive Relations of a Discrete Transducer {#constitutive-relations-of-a-discrete-transducer} + +The set of equations [eq:piezo_eq](#eq:piezo_eq) can be written in a matrix form: + +\begin{equation} +\begin{bmatrix}D\\S\end{bmatrix} += +\begin{bmatrix} +\epsilon^T & d\_{33}\\\\\\ +d\_{33} & s^E +\end{bmatrix} +\begin{bmatrix}E\\T\end{bmatrix} +\end{equation} + +Where \\((E, T)\\) are the independent variables and \\((D, S)\\) are the dependent variable. + +If \\((E, S)\\) are taken as independant variables: + +\begin{equation} +\begin{bmatrix}D\\T\end{bmatrix} += +\begin{bmatrix} +\epsilon^T(1-k^2) & e\_{33}\\\\\\ +-e\_{33} & c^E +\end{bmatrix} +\begin{bmatrix}E\\S\end{bmatrix} +\end{equation} + +With: + +- \\(c^E = \frac{1}{s^E}\\) is the Young modulus under short circuited electrodes (\\(E = 0\\)) in \\([N/m^2]\\) +- \\(e\_{33} = \frac{d\_{33}}{s^E}\\) is the constant relating the electric displacement to the strain for short-circuited electrodes \\([C/m^2]\\) + +
+
+ +\begin{equation} + k^2 = \frac{{d\_{33}}^2}{s^E \epsilon^T} = \frac{{e\_{33}}^2}{c^E \epsilon^T} +\end{equation} + +\\(k\\) is called the **electromechanical coupling factor** of the material. +It measures the efficiency of the conversion of the mechanical energy into electrical energy, and vice versa. + +
+ +If one assumes that all the electrical and mechanical quantities are uniformly distributed in a linear transducer formed by a **stack** (see figure [fig:piezo_stack](#fig:piezo_stack)) of \\(n\\) disks of thickness \\(t\\) and cross section \\(A\\), the global constitutive equations of the transducer are obtained by integrating [eq:piezo_eq_matrix_bis](#eq:piezo_eq_matrix_bis) over the volume of the transducer: + +\begin{equation} +\begin{bmatrix}Q\\\Delta\end{bmatrix} += +\begin{bmatrix} +C & nd\_{33}\\\\\\ +nd\_{33} & 1/K\_a +\end{bmatrix} +\begin{bmatrix}V\\f\end{bmatrix} +\end{equation} + +where + +- \\(Q = n A D\\) is the total electric charge on the electrodes of the transducer +- \\(\Delta = S l\\) is the total extension (\\(l = nt\\) is the length of the transducer) +- \\(f = AT\\) is the total force +- \\(V\\) is the voltage applied between the electrodes of the transducer +- \\(C = \epsilon^T A n^2/l\\) is the capacitance of the transducer with no external load (\\(f = 0\\)) +- \\(K\_a = A/s^El\\) is the stiffness with short-circuited electrodes (\\(V = 0\\)) + + + +{{< figure src="/ox-hugo/preumont18_piezo_stack.png" caption="Figure 17: Piezoelectric linear transducer" >}} + +Equation [eq:piezo_stack_eq](#eq:piezo_stack_eq) can be inverted to obtain + +\begin{equation} +\begin{bmatrix}V\\f\end{bmatrix} += +\frac{K\_a}{C(1-k^2)} +\begin{bmatrix} +1/K\_a & -nd\_{33}\\\\\\ +-nd\_{33} & C +\end{bmatrix} +\begin{bmatrix}Q\\\Delta\end{bmatrix} +\end{equation} + + +#### Energy Stored in the Piezoelectric Transducer {#energy-stored-in-the-piezoelectric-transducer} + +Let us write the total stored electromechanical energy of a discrete piezoelectric transducer as shown on figure [fig:piezo_discrete](#fig:piezo_discrete). + +The total power delivered to the transducer is the sum of electric power \\(V i\\) and the mechanical power \\(f \dot{\Delta}\\). The net work of the transducer is + +\begin{equation} + dW = V i dt + f \dot{\Delta} dt = V dQ + f d\Delta +\end{equation} + + + +{{< figure src="/ox-hugo/preumont18_piezo_discrete.png" caption="Figure 18: Discrete Piezoelectric Transducer" >}} + +By integrating equation [eq:piezo_work](#eq:piezo_work) and using the constitutive equations [eq:piezo_stack_eq_inv](#eq:piezo_stack_eq_inv), we obtain the analytical expression of the stored electromechanical energy for the discrete transducer: + +\begin{equation} + W\_e(\Delta, Q) = \frac{Q^2}{2 C (1 - k^2)} - \frac{n d\_{33} K\_a}{C(1-k^2)} Q\Delta + \frac{K\_a}{1-k^2}\frac{\Delta^2}{2} +\end{equation} + +- The first term is the electrical energy stored in the capacitance \\(C(1-k^2)\\) (corresponding to fixed geometry \\(\Delta = 0\\)) +- The second term is the piezoelectric energy +- The third term is the elastic strain energy stored in a spring stiffness \\(K\_a/(1-k^2)\\) (corresponding to open electrodes \\(Q=0\\)) + +The constitutive equations can be recovered by differentiate the stored energy: +\\[ f = \frac{\partial W\_e}{\partial \Delta}, \quad V = \frac{\partial W\_e}{\partial Q} \\] + + +#### Interpretation of \\(k^2\\) {#interpretation-of--k-2} + +Consider a piezoelectric transducer subjected to the following mechanical cycle: first, it is loaded with a force \\(F\\) with short-circuited electrodes; the resulting extension is \\(\Delta\_1 = F/K\_a\\) where \\(K\_a = A/(s^El)\\) is the stiffness with short-circuited electrodes. +The energy stored in the system is: +\\[ W\_1 = \int\_0^{\Delta\_1} f dx = \int\_0^{\Delta\_1} K\_a x dx = \frac{F^2}{2 K\_a} \\] + +At this point, the electrodes are open and the transducer is unloaded according to a path of slope \\(K\_a/(1-k^2)\\), the resulting extension is \\(\Delta\_2 = \frac{F(1-k^2)}{K\_a}\\). +The energy recovered is +\\[ W\_1 = \int\_0^{\Delta\_2} f dx = \frac{F \Delta\_2}{2} = \frac{F^2(1-k^2)}{2 K\_a} \\] + +The ratio between the remaining stored energy and the initial stored energy is +\\[ \frac{W\_1 - W\_2}{W\_1} = k^2 \\] + + +#### Admittance of the Piezoelectric Transducer {#admittance-of-the-piezoelectric-transducer} + +Consider the system of figure [fig:piezo_stack_admittance](#fig:piezo_stack_admittance), where the piezoelectric transducer is assumed massless and is connected to a mass \\(M\\). +The force acting on the mass is negative of that acting on the transducer, \\(f = -M \ddot{x}\\). + + + +{{< figure src="/ox-hugo/preumont18_piezo_stack_admittance.png" caption="Figure 19: Elementary dynamical model of the piezoelectric transducer" >}} + +From the constitutive equations, one finds + +\begin{equation} + \frac{I}{V} = s C (1-k^2) \frac{s^2 + z^2}{s^2 + p^2} +\end{equation} + +where the poles and zeros are respectively +\\[ p^2 = \frac{K\_a}{M},\quad z^2 = \frac{K\_a/(1-k^2)}{M} \\] + +And one can see that + +\begin{equation} + \frac{z^2 - p^2}{z^2} = k^2 +\end{equation} + +Equation [eq:distance_p_z](#eq:distance_p_z) constitutes a practical way to determine the electromechanical coupling factor from the poles and zeros of the admittance measurement (figure [fig:piezo_admittance_curve](#fig:piezo_admittance_curve)). + + + +{{< figure src="/ox-hugo/preumont18_piezo_admittance_curve.png" caption="Figure 20: Typical admittance FRF of the transducer" >}} + + +## Piezoelectric Beam, Plate and Truss {#piezoelectric-beam-plate-and-truss} + + +### Piezoelectric Material {#piezoelectric-material} + + +#### Constitutive Relations {#constitutive-relations} + + +#### Coenergy Density Function {#coenergy-density-function} + + +### Hamilton's Principle {#hamilton-s-principle} + + +### Piezoelectric Beam Actuator {#piezoelectric-beam-actuator} + + +#### Hamilton's Principle {#hamilton-s-principle} + + +#### Piezoelectric Loads {#piezoelectric-loads} + + +### Laminar Sensor {#laminar-sensor} + + +#### Current and Charge Amplifiers {#current-and-charge-amplifiers} + + +#### Distributed Sensor Output {#distributed-sensor-output} + + +#### Charge Amplifier Dynamics {#charge-amplifier-dynamics} + + +### Spatial Modal Filters {#spatial-modal-filters} + + +#### Modal Actuator {#modal-actuator} + + +#### Modal Sensor {#modal-sensor} + + +### Active Beam with Collocated Actuator/Sensor {#active-beam-with-collocated-actuator-sensor} + + +#### Frequency Response Function {#frequency-response-function} + + +#### Pole-Zero Pattern {#pole-zero-pattern} + + +#### Modal Truncation {#modal-truncation} + + +### Admittance of a Beam with a Piezoelectric Patch {#admittance-of-a-beam-with-a-piezoelectric-patch} + + +### Piezoelectric Laminate {#piezoelectric-laminate} + + +#### Two-Dimensional Constitutive Equations {#two-dimensional-constitutive-equations} + + +#### Kirchhoff Theory {#kirchhoff-theory} + + +#### Stiffness Matrix of a Multilayer Elastic Laminate {#stiffness-matrix-of-a-multilayer-elastic-laminate} + + +#### Multilayer Laminate with a Piezoelectric Layer {#multilayer-laminate-with-a-piezoelectric-layer} + + +#### Equivalent Piezoelectric Loads {#equivalent-piezoelectric-loads} + + +#### Sensor Output {#sensor-output} + + +#### Beam Model Versus Plate Model {#beam-model-versus-plate-model} + + +#### Additional Remarks {#additional-remarks} + + +### Active Truss {#active-truss} + + +#### Open-Loop Transfer Function {#open-loop-transfer-function} + + +#### Admittance Function {#admittance-function} + + +### Finite Element Formulation {#finite-element-formulation} + + +### Problems {#problems} + + +### References {#references} + + +## Passive Damping with Piezoelectric Transducers {#passive-damping-with-piezoelectric-transducers} + + +### Introduction {#introduction} + + +### Resistive Shunting {#resistive-shunting} + + +### Inductive Shunting {#inductive-shunting} + + +#### Equal Peak Design {#equal-peak-design} + + +#### Robustness of the Equal Peak Design {#robustness-of-the-equal-peak-design} + + +### Switched Shunt {#switched-shunt} + + +#### Equivalent Damping Ratio {#equivalent-damping-ratio} + + +## Collocated Versus Non-collocated Control {#collocated-versus-non-collocated-control} + + +### Pole-Zero Flipping {#pole-zero-flipping} + +
+
+ +The Root Locus shows, in a graphical form, the evolution of the poles of the closed-loop system as a function of the scalar gain \\(g\\) applied to the compensator. +The Root Locus is the locus of the solution \\(s\\) of the closed loop characteristic equation \\(1 + gG(s)H(s) = 0\\) when \\(g\\) goes from zero to infinity. + +
+ +If the open-loop transfer function is written +\\[ G(s)H(s) = k \frac{\Pi\_{i=1}^{m} (s - z\_i)}{\Pi\_{i=1}^{n} (s - p\_i)} \\] +The locus goes from the poles \\(p\_i\\) (for \\(g=0\\)) to the zeros \\(z\_i\\) (as \\(g \rightarrow \infty\\)). + + +### The Two-Mass Problem {#the-two-mass-problem} + + +#### Collocated Control {#collocated-control} + + +#### Non-collocated Control {#non-collocated-control} + + +### Notch Filter {#notch-filter} + + +### Effect of Pole-Zero Flipping on the Bode Plots {#effect-of-pole-zero-flipping-on-the-bode-plots} + + +### Nearly Collocated Control System {#nearly-collocated-control-system} + + +### Non-collocated Control Systems {#non-collocated-control-systems} + + +### The Role of Damping {#the-role-of-damping} + + +## Active Damping with Collocated System {#active-damping-with-collocated-system} + + +### Introduction {#introduction} + +The role of active damping is to increase the negative real parts of system poles wile maintaining the natural frequencies essentially unchanged. + +Active damping requires relatively little control effort; this is why it is also called Low Authority Control (LAC). +Other control strategies which fully relocate the closed loop poles are called High Autority Control (HAC). + + +### Lead Control {#lead-control} + +\\[H(s) = g \frac{s+z}{z+p} \quad p \gg z \\] + +It produces a phase lead in the frequency band between \\(z\\) and \\(p\\), bringing active damping to all the modes belonging to \\(z < \omega\_i < p\\). + +The closed-loop poles start at the open-llop poles for \\(g=0\\) and go to the open-loop zeros for \\(g\rightarrow\infty\\). + +The controller does not have any roll-off, but the roll-off of the structure is enough to guarantee gain stability at high frequency. + + +### Direct Velocity Feedback (DVF) {#direct-velocity-feedback--dvf} + +This is a particular case of the Lead controller as \\(z\rightarrow 0\\) and \\(p\rightarrow\infty\\). + +Structure: +\\[M \ddot{x} + K x = b u\\] + +Output is a velocity sensor: +\\[y = b^T \dot{x}\\] + +Control: +\\[u = -g y\\] + + +### Positive Position Feedback (PPF) {#positive-position-feedback--ppf} + +Sometimes the plant does not have a roll-off of \\(-40dB/\text{decade}\\), then we can use a second-order PPF: +\\[H(s) = \frac{-g}{s^2 + 2 \xi\_f \omega\_f s + {\omega\_f}^2}\\] + + +### Integral Force Feedback (IFF) {#integral-force-feedback--iff} + + +### Duality Between the Lead and the IFF Controllers {#duality-between-the-lead-and-the-iff-controllers} + + +#### Root Locus of a Single Mode {#root-locus-of-a-single-mode} + + +#### Open-Loop Poles and Zeros {#open-loop-poles-and-zeros} + + +### Actuator and Sensor Dynamics {#actuator-and-sensor-dynamics} + + +### Decentralized Control with Collocated Pairs {#decentralized-control-with-collocated-pairs} + + +#### Cross talk {#cross-talk} + + +#### Force Actuator and Displacement Sensor {#force-actuator-and-displacement-sensor} + + +#### Displacement Actuator and Force Sensor {#displacement-actuator-and-force-sensor} + + +### Proof of Equation (7.18)–(7.32) {#proof-of-equation--7-dot-18----7-dot-32} + + +## Vibration Isolation {#vibration-isolation} + + +### Introduction {#introduction} + + +### Relaxation Isolator {#relaxation-isolator} + + +#### Electromagnetic Realization {#electromagnetic-realization} + + +### Active Isolation {#active-isolation} + + +#### Sky-Hook Damper {#sky-hook-damper} + + +#### Integral Force Feedback {#integral-force-feedback} + + +### Flexible Body {#flexible-body} + + +#### Free-Free Beam with Isolator {#free-free-beam-with-isolator} + + +### Payload Isolation in Spacecraft {#payload-isolation-in-spacecraft} + + +#### Interaction Isolator/Attitude Control {#interaction-isolator-attitude-control} + + +#### Gough–Stewart Platform {#gough-stewart-platform} + + +### Six-Axis Isolator {#six-axis-isolator} + + +#### Relaxation Isolator {#relaxation-isolator} + + +#### Integral Force Feedback {#integral-force-feedback} + + +#### Spherical Joints, Modal Spread {#spherical-joints-modal-spread} + + +### Active Versus Passive {#active-versus-passive} + + +### Car Suspension {#car-suspension} + + +## State Space Approach {#state-space-approach} + + +### Introduction {#introduction} + + +### State Space Description {#state-space-description} + + +#### Single Degree of Freedom Oscillator {#single-degree-of-freedom-oscillator} + + +#### Flexible Structure {#flexible-structure} + + +#### Inverted Pendulum {#inverted-pendulum} + + +### System Transfer Function {#system-transfer-function} + + +#### Poles and Zeros {#poles-and-zeros} + + +### Pole Placement by State Feedback {#pole-placement-by-state-feedback} + + +#### Example: Oscillator {#example-oscillator} + + +### Linear Quadratic Regulator {#linear-quadratic-regulator} + + +#### Symmetric Root Locus {#symmetric-root-locus} + + +#### Inverted Pendulum {#inverted-pendulum} + + +### Observer Design {#observer-design} + + +### Kalman Filter {#kalman-filter} + + +#### Inverted Pendulum {#inverted-pendulum} + + +### Reduced-Order Observer {#reduced-order-observer} + + +#### Oscillator {#oscillator} + + +#### Inverted Pendulum {#inverted-pendulum} + + +### Separation Principle {#separation-principle} + + +### Transfer Function of the Compensator {#transfer-function-of-the-compensator} + + +#### The Two-Mass Problem {#the-two-mass-problem} + + +## Analysis and Synthesis in the Frequency Domain {#analysis-and-synthesis-in-the-frequency-domain} + + +### Gain and Phase Margins {#gain-and-phase-margins} + + +### Nyquist Criterion {#nyquist-criterion} + + +#### Cauchy's Principle {#cauchy-s-principle} + + +#### Nyquist Stability Criterion {#nyquist-stability-criterion} + + +### Nichols Chart {#nichols-chart} + + +### Feedback Specification for SISO Systems {#feedback-specification-for-siso-systems} + + +#### Sensitivity {#sensitivity} + + +#### Tracking Error {#tracking-error} + + +#### Performance Specification {#performance-specification} + + +#### Unstructured Uncertainty {#unstructured-uncertainty} + + +#### Robust Performance and Robust Stability {#robust-performance-and-robust-stability} + + +### Bode Gain–Phase Relationships {#bode-gain-phase-relationships} + + +### The Bode Ideal Cutoff {#the-bode-ideal-cutoff} + + +### Non-minimum Phase Systems {#non-minimum-phase-systems} + + +### Usual Compensators {#usual-compensators} + + +#### System Type {#system-type} + + +#### Lead Compensator {#lead-compensator} + + +#### PI Compensator {#pi-compensator} + + +#### Lag Compensator {#lag-compensator} + + +#### PID Compensator {#pid-compensator} + + +### Multivariable Systems {#multivariable-systems} + + +#### Performance Specification {#performance-specification} + + +#### Small Gain Theorem {#small-gain-theorem} + + +#### Stability Robustness Tests {#stability-robustness-tests} + + +#### Residual Dynamics {#residual-dynamics} + + +## Optimal Control {#optimal-control} + + +### Introduction {#introduction} + + +### Quadratic Integral {#quadratic-integral} + + +### Deterministic LQR {#deterministic-lqr} + + +### Stochastic Response to a White Noise {#stochastic-response-to-a-white-noise} + + +#### Remark {#remark} + + +### Stochastic LQR {#stochastic-lqr} + + +### Asymptotic Behavior of the Closed Loop {#asymptotic-behavior-of-the-closed-loop} + + +### Prescribed Degree of Stability {#prescribed-degree-of-stability} + + +### Gain and Phase Margins of the LQR {#gain-and-phase-margins-of-the-lqr} + + +### Full State Observer {#full-state-observer} + + +#### Covariance of the Reconstruction Error {#covariance-of-the-reconstruction-error} + + +### Kalman Filter (KF) {#kalman-filter--kf} + + +### Linear Quadratic Gaussian (LQG) {#linear-quadratic-gaussian--lqg} + + +### Duality {#duality} + + +### Spillover {#spillover} + + +#### Spillover Reduction {#spillover-reduction} + + +### Loop Transfer Recovery (LTR) {#loop-transfer-recovery--ltr} + + +### Integral Control with State Feedback {#integral-control-with-state-feedback} + + +### Frequency Shaping {#frequency-shaping} + +Weakness of LQG: + +- use frequency independant cost function +- use noise statistics with uniform distribution + +To overcome the weakness => frequency shaping either by: + +- considering a frequency dependant cost function +- using colored noise statistics + + +#### Frequency-Shaped Cost Functionals {#frequency-shaped-cost-functionals} + + +#### Noise Model {#noise-model} + + +## Controllability and Observability {#controllability-and-observability} + + +### Introduction {#introduction} + + +#### Definitions {#definitions} + + +### Controllability and Observability Matrices {#controllability-and-observability-matrices} + + +### Examples {#examples} + + +#### Cart with Two Inverted Pendulums {#cart-with-two-inverted-pendulums} + + +#### Double Inverted Pendulum {#double-inverted-pendulum} + + +#### Two d.o.f. Oscillator {#two-d-dot-o-dot-f-dot-oscillator} + + +### State Transformation {#state-transformation} + + +#### Control Canonical Form {#control-canonical-form} + + +#### Left and Right Eigenvectors {#left-and-right-eigenvectors} + + +#### Diagonal Form {#diagonal-form} + + +### PBH Test {#pbh-test} + + +### Residues {#residues} + + +### Example {#example} + + +### Sensitivity {#sensitivity} + + +### Controllability and Observability Gramians {#controllability-and-observability-gramians} + + +### Internally Balanced Coordinates {#internally-balanced-coordinates} + + +### Model Reduction {#model-reduction} + + +#### Transfer Equivalent Realization {#transfer-equivalent-realization} + + +#### Internally Balanced Realization {#internally-balanced-realization} + + +#### Example {#example} + + +## Stability {#stability} + + +### Introduction {#introduction} + + +#### Phase Portrait {#phase-portrait} + + +### Linear Systems {#linear-systems} + + +#### Routh--Hurwitz Criterion {#routh-hurwitz-criterion} + + +### Lyapunov's Direct Method {#lyapunov-s-direct-method} + + +#### Introductory Example {#introductory-example} + + +#### Stability Theorem {#stability-theorem} + + +#### Asymptotic Stability Theorem {#asymptotic-stability-theorem} + + +#### Lasalle's Theorem {#lasalle-s-theorem} + + +#### Geometric Interpretation {#geometric-interpretation} + + +#### Instability Theorem {#instability-theorem} + + +### Lyapunov Functions for Linear Systems {#lyapunov-functions-for-linear-systems} + + +### Lyapunov's Indirect Method {#lyapunov-s-indirect-method} + + +### An Application to Controller Design {#an-application-to-controller-design} + + +### Energy Absorbing Controls {#energy-absorbing-controls} + + +## Applications {#applications} + + +### Digital Implementation {#digital-implementation} + + +#### Sampling, Aliasing, and Prefiltering {#sampling-aliasing-and-prefiltering} + + +#### Zero-Order Hold, Computational Delay {#zero-order-hold-computational-delay} + + +#### Quantization {#quantization} + + +#### Discretization of a Continuous Controller {#discretization-of-a-continuous-controller} + + +### Active Damping of a Truss Structure {#active-damping-of-a-truss-structure} + + +#### Actuator Placement {#actuator-placement} + + +#### Implementation, Experimental Results {#implementation-experimental-results} + + +### Active Damping Generic Interface {#active-damping-generic-interface} + + +#### Active Damping {#active-damping} + + +#### Experiment {#experiment} + + +#### Pointing and Position Control {#pointing-and-position-control} + + +### Active Damping of a Plate {#active-damping-of-a-plate} + + +#### Control Design {#control-design} + + +### Active Damping of a Stiff Beam {#active-damping-of-a-stiff-beam} + + +#### System Design {#system-design} + + +### The HAC/LAC Strategy {#the-hac-lac-strategy} + +In active structures for precision engineering applications, the control system is used to reduce the effect of transient and steady-state disturbances on the controlled variables. +Active damping is very effective in reducing the settling time of transient disturbances and the effect of steady state disturbances near the resonance frequencies of the system; however, away from the resonances, the active damping is completely ineffective and leaves the closed-loop response essentially unchanged. +Such low-gain controllers are often called Low Authority Controllers (LAC), because they modify the poles of the system only slightly. + +To attenuate wide-band disturbances, the controller needs larger gains, in order to cause more substantial modifications to the poles of the open-loop system; this is the reason why they are often called High Authority Controllers (HAC). +Their design requires a model of the structure, and there is usually a trade-off between the conflicting requirements of performance-bandwidth and stability in the face of parametric uncertainty and unmodelled dynamics. + +When collocated actuator/sensor pairs can be used, stability can be achieved using positivity concepts, but in many situations, collocated pairs are not feasible for HAC. + +The HAC/LAC approach consist of combining the two approached in a dual-loop control as shown in Figure [fig:hac_lac_control](#fig:hac_lac_control). +The inner loop uses a set of collocated actuator/sensor pairs for decentralized active damping with guaranteed stability ; the outer loop consists of a non-collocated HAC based on a model of the actively damped structure. +This approach has the following advantages: + +- The active damping extends outside the bandwidth of the HAC and reduces the settling time of the modes which are outsite the bandwidth +- The active damping makes it easier to gain-stabilize the modes outside the bandwidth of the output loop (improved gain margin) +- The larger damping of the modes within the controller bandwidth makes them more robust to the parmetric uncertainty (improved phase margin) + + + +{{< figure src="/ox-hugo/preumont18_hac_lac_control.png" caption="Figure 21: Principle of the dual-loop HAC/LAC control" >}} + + +#### Wide-Band Position Control {#wide-band-position-control} + + +#### Compensator Design {#compensator-design} + + +#### Results {#results} + + +### Vibroacoustics: Volume Displacement Sensors {#vibroacoustics-volume-displacement-sensors} + + +#### QWSIS Sensor {#qwsis-sensor} + + +#### Discrete Array Sensor {#discrete-array-sensor} + + +#### Spatial Aliasing {#spatial-aliasing} + + +#### Distributed Sensor {#distributed-sensor} + + +## Tendon Control of Cable Structures {#tendon-control-of-cable-structures} + + +### Introduction {#introduction} + + +### Tendon Control of Strings and Cables {#tendon-control-of-strings-and-cables} + + +### Active Damping Strategy {#active-damping-strategy} + + +### Basic Experiment {#basic-experiment} + + +### Linear Theory of Decentralized Active Damping {#linear-theory-of-decentralized-active-damping} + + +### Guyed Truss Experiment {#guyed-truss-experiment} + + +### Microprecision Interferometer Testbed {#microprecision-interferometer-testbed} + + +### Free-Floating Truss Experiment {#free-floating-truss-experiment} + + +### Application to Cable-Stayed Bridges {#application-to-cable-stayed-bridges} + + +#### Laboratory Experiment {#laboratory-experiment} + + +#### Control of Parametric Resonance {#control-of-parametric-resonance} + + +#### Large Scale Experiment {#large-scale-experiment} + + +### Application to Suspension Bridges {#application-to-suspension-bridges} + + +#### Footbridge {#footbridge} + + +#### Laboratory Experiment {#laboratory-experiment} + + +## Active Control of Large Telescopes: Adaptive Optics {#active-control-of-large-telescopes-adaptive-optics} + + +### Introduction {#introduction} + + +#### Wavefront Sensor {#wavefront-sensor} + + +#### Zernike Modes {#zernike-modes} + + +#### Fried Length, Seeing {#fried-length-seeing} + + +#### Kolmogorov Turbulence Model {#kolmogorov-turbulence-model} + + +#### Strehl Ratio {#strehl-ratio} + + +#### Power Spectral Density of the Zernike Modes {#power-spectral-density-of-the-zernike-modes} + + +### Deformable Mirror for Adaptive Optics {#deformable-mirror-for-adaptive-optics} + + +#### Stoney Formula {#stoney-formula} + + +#### Stroke Versus Natural Frequency {#stroke-versus-natural-frequency} + + +### Feedback Control of an AO Mirror {#feedback-control-of-an-ao-mirror} + + +#### Quasi-static Control {#quasi-static-control} + + +#### Control of the Mirror Based on the Jacobian {#control-of-the-mirror-based-on-the-jacobian} + + +#### Control of Zernike Modes {#control-of-zernike-modes} + + +### Dynamic Response of the AO Mirror {#dynamic-response-of-the-ao-mirror} + + +#### Dynamic Model of the Mirror {#dynamic-model-of-the-mirror} + + +#### Control-Structure Interaction {#control-structure-interaction} + + +#### Passive Damping {#passive-damping} + + +#### Active Damping {#active-damping} + + +### Miscellaneous {#miscellaneous} + + +#### Segmented AO Mirror {#segmented-ao-mirror} + + +#### Initial Curvature of the AO Mirror {#initial-curvature-of-the-ao-mirror} + + +## Active Control of Large Telescopes: Active Optics {#active-control-of-large-telescopes-active-optics} + + +### Introduction {#introduction} + + +### Monolithic Primary Mirror {#monolithic-primary-mirror} + + +### Segmented Primary Mirror {#segmented-primary-mirror} + + +### SVD Controller {#svd-controller} + + +#### Loop Shaping of the SVD Controller {#loop-shaping-of-the-svd-controller} + + +### Dynamics of a Segmented Mirror {#dynamics-of-a-segmented-mirror} + + +### Control-Structure Interaction {#control-structure-interaction} + + +#### SISO System {#siso-system} + + +#### MIMO System {#mimo-system} + + +#### Spillover Alleviation {#spillover-alleviation} + + +### Scaling Rules {#scaling-rules} + + +#### Static Deflection Under Gravity {#static-deflection-under-gravity} + + +#### First Resonance Frequency {#first-resonance-frequency} + + +#### Control Bandwidth {#control-bandwidth} + + +## Adaptive Thin Shell Space Reflectors {#adaptive-thin-shell-space-reflectors} + + +### Introduction {#introduction} + + +### Adaptive Plates Versus Adaptive Shells {#adaptive-plates-versus-adaptive-shells} + + +### Adaptive Spherical Shell {#adaptive-spherical-shell} + + +### Quasi-static Control: Hierarchical Approach {#quasi-static-control-hierarchical-approach} + + +### Petal Configuration {#petal-configuration} + + +### MATS Demonstrator {#mats-demonstrator} + + +#### Manufacturing of the Demonstrator {#manufacturing-of-the-demonstrator} + + +## Semi-active Control {#semi-active-control} + + +### Introduction {#introduction} + + +### Magneto-Rheological Fluids {#magneto-rheological-fluids} + + +### MR Devices {#mr-devices} + + +### Semi-active Suspension {#semi-active-suspension} + + +#### Semi-active Devices {#semi-active-devices} + + +### Narrow-Band Disturbance {#narrow-band-disturbance} + + +#### Quarter-Car Semi-active Suspension {#quarter-car-semi-active-suspension} + + +### Problems {#problems} + +# Bibliography +Preumont, A., *Vibration control of active structures - fourth edition* (2018), : Springer International Publishing. [↩](#454500a3af67ef66a7a754d1f2e1bd4a) diff --git a/content/book/schmidt14_desig_high_perfor_mechat_revis_edition.md b/content/book/schmidt14_desig_high_perfor_mechat_revis_edition.md new file mode 100644 index 0000000..19b29c3 --- /dev/null +++ b/content/book/schmidt14_desig_high_perfor_mechat_revis_edition.md @@ -0,0 +1,37 @@ ++++ +title = "The design of high performance mechatronics - 2nd revised edition" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Reference Books]({{< relref "reference_books" >}}) + +Reference +: (Schmidt {\it et al.}, 2014) + +Author(s) +: Schmidt, R. M., Schitter, G., & Rankers, A. + +Year +: 2014 + +Section 2.2 Mechanics + +> The core of a mechatronic system is its mechanical construction and in spite of many decade of excellent designs, optimizing the mechanical structure in strength, mass and endurance, the mechanical behavior will always remain the limiting factor of the performance of any mechatronic system. + +Section 2.2.2 Force and Motion + +> _Statics_ deals with the stress levels that are present in the mechanical system when (quasi-)static forces are exerted on it. +> It analyses the linear and non-linear strain effects that are caused by elastic and plastic deformation under these stress levels. +> +> _Dynamics_ deals with the behaviour of the mechanical system under changing forces, while often the effects are linearised and limited to strain levels well below any irreversible plastic deformation. +> One should however be aware that another non-destructive source of non-linearity is found in a tried important field of mechanics, called _kinematics_. +> The relation between angles and positions is often non-linear in such a mechanism, because of the changing angles, and controlling these often requires special precautions to overcome the inherent non-linearities by linearisation around actual position and adapting the optimal settings of the controller to each position. + +Section 9.3: Mass Dilemma + +> A reduced mass requires improved system dynamics that enable a higher control bandwidth to compensate for the increase sensitivity for external vibrations. + +# Bibliography +Schmidt, R. M., Schitter, G., & Rankers, A., *The design of high performance mechatronics - 2nd revised edition* (2014), : Ios Press. [↩](#37468bbe5988cc7f4878fcd664d9cb7f) diff --git a/content/book/skogestad07_multiv_feedb_contr.md b/content/book/skogestad07_multiv_feedb_contr.md new file mode 100644 index 0000000..fed3d13 --- /dev/null +++ b/content/book/skogestad07_multiv_feedb_contr.md @@ -0,0 +1,6269 @@ ++++ +title = "Multivariable feedback control: analysis and design" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Reference Books]({{< relref "reference_books" >}}), [Multivariable Control]({{< relref "multivariable_control" >}}) + +Reference +: (Skogestad \& Postlethwaite, 2007) + +Author(s) +: Skogestad, S., & Postlethwaite, I. + +Year +: 2007 + + +## Introduction {#introduction} + + + + +### The Process of Control System Design {#the-process-of-control-system-design} + +The process of designing a control system is a step by step design procedure as follows: + +1. Study the system (plant) to be controlled and obtain initial information about the **control objectives** +2. **model the system** and simplify the model, if necessary +3. **scale the variables** and analyze the resulting model; determine its properties +4. Decide which variables are to be controlled (controlled outputs) +5. Decide on the measurements and manipulated variables: what sensors and actuators will be used and where will they be placed? +6. Select the **control configuration** +7. Decide on the type of controller to be used +8. Decide on performance specifications, based on the overall control objectives +9. Design a controller +10. Analyze the resulting controlled system to see if the specifications are satisfied; and if they are not satisfied modify the specifications or the type of controller +11. Simulate the resulting controlled system +12. Repeat from step 2 if necessary +13. Choose hardware and software and implement the controller +14. Test and validate the control system, and tune the controller on-line, if necessary + +Input-output controllability analysis is studied in section [sec:perf_limit_siso](#sec:perf_limit_siso) for SISO systems and in section [sec:perf_limit_mimo](#sec:perf_limit_mimo) for MIMO systems. +The steps 4, 5, 6 and 7 are corresponding to the **control structure design**. This is treated in section [sec:controller_structure_design](#sec:controller_structure_design). +The design of the controller is described in section [sec:controller_design](#sec:controller_design). +The analysis of performance and robustness of a controlled system is studied in sections [sec:uncertainty_robustness_siso](#sec:uncertainty_robustness_siso) and [sec:robust_perf_mimo](#sec:robust_perf_mimo). + + +### The Control Problem {#the-control-problem} + +The objective of a control system is to make the output \\(y\\) behave in a desired way by manipulating the plant input \\(u\\). +The **regulator problem** is to manipulate \\(u\\) to counteract the effect of a disturbance \\(d\\). +The **servo problem** is to manipulate \\(u\\) to keep the output close to a given reference input \\(r\\).
+ +In both cases, we want the control error \\(e = y - r\\) to be small. +The algorithm for adjusting \\(u\\) based on \\(y\\) is the **controller** \\(K\\). +To arrive at a good design for \\(K\\) we need information about the expected disturbances, the reference inputs, the plant model \\(G\\) and disturbance model \\(G\_d\\).
+ +A major source of difficulty is that models may be inaccurate or may change with time. +The inaccuracy in \\(G\\) may cause instability problems as it is part of the feedback loop. +To deal with such a problem, the concept of **model uncertainty** will be used.
+ +
+
+ +**Nominal Stability (NS)** +: The system is stable with no model uncertainty + +**Nominal Performance (NP)** +: The system satisfies the performance specifications with no model uncertainty + +**Robust Stability (RS)** +: The system is stable for all perturbed plants about the nominal model up to the worst case uncertainty + +**Robust Performance (RP)** +: The system satisfies the performance specifications for all perturbed plants about the nominal model up to the worst-case model uncertainty + +
+ + +### Transfer Functions {#transfer-functions} + +Properties of transfer functions: + +- A system \\(G(s)\\) is **strictly proper** if \\(G(s) \rightarrow 0\\) as \\(\w \rightarrow \infty\\) +- A system \\(G(s)\\) is **semi-proper** if \\(G(s) \rightarrow D \ne 0\\) as \\(\w \rightarrow \infty\\) +- A system \\(G(s)\\) is **proper** if \\(G(s)\\) is strictly proper or semi-proper +- The order of the system noted \\(n\\) and is the order of the denominator (or pole polynomial) of its matrix transfer function + + +### Scaling {#scaling} + +Scaling is very important in applications, both for model analysis (input-output controllability) and for controller design.
+ +The scaling is done by **dividing each variable by its maximum expected or allowed change**. +That way, the scaled variable should be less than one in magnitude.
+ +We denote variables in their unscaled units by a hat. + +- \\(d = \hat{d}/D\_d\\) with \\(D\_d = \hat{d}\_{\max}\\) is the largest expected change in disturbance +- \\(u = \hat{u}/D\_u\\) with \\(D\_u = \hat{u}\_{\max}\\) is the largest allowed input change + +The variables \\(\hat{y}\\), \\(\hat{r}\\) and \\(\hat{e}\\) are in the same unit, so we choose to scale them with respect to the maximum allowed control error: + +- \\(e = \hat{e}/D\_e\\) with \\(D\_e = \hat{e}\_{\max}\\) is the largest allowed control error +- \\(r = \hat{r}/D\_e\\) +- \\(y = \hat{y}/D\_e\\) + +For MIMO systems, each variables in the vectors \\(\hat{d}\\), \\(\hat{r}\\), \\(\hat{u}\\) and \\(\hat{e}\\) may have a different maximum value, in which case \\(D\_e\\), \\(D\_u\\), \\(D\_s\\) and \\(D\_r\\), become diagonal scaling matrices. + +
+
+ +\begin{align\*} +G &= D\_e^{-1} \hat{G} D\_u\\\\\\ +G\_d &= D\_e^{-1} \hat{G\_d} D\_d +\end{align\*} + +
+ +We then obtain the following model in terms of scaled variables: +\\[ y = G u + G\_d d \\] +where \\(u\\) and \\(d\\) should be less than 1 in magnitude.
+ +It is sometimes useful to introduce a **scaled reference** \\(\tilde{r}\\) which is less than 1 in magnitude: \\(\tilde{r} = \hat{r}/\hat{r}\_{\max} = D\_r^{-1}\hat{r}\\) +Then we have \\(r = R \tilde{r}\\) with \\(R \triangleq D\_e^{-1}D\_r = \hat{r}\_{\max}/\hat{e}\_{\max}\\) is the largest expected change in reference relative to the allowed control error.
+ +With scaling you make initial decision regarding performance. This makes **weight selection simple later** (may often select identity weights if initial scaling is reasonable!). + + +### Deriving Linear Models {#deriving-linear-models} + +Linear models may be obtained from physical "first-principle" models or from analyzing input-output data (**identification**).
+ +In order to obtain a linear model from the "first-principle", the following approach is used: + +1. Formulate a nonlinear state-space model based on physical knowledge +2. Determine the steady-state operating point about which to linearize +3. Introduce deviation variables and linearize the model + + +### Notation {#notation} + +Notations used throughout this note are summarized in tables [table:notation_conventional](#table:notation_conventional), [table:notation_general](#table:notation_general) and [table:notation_tf](#table:notation_tf). + + +
+ Table 1: + Notations for the conventional control configuration +
+ +| Notation | Meaning | +|------------|-------------------| +| \\(G\\) | Plant model | +| \\(K\\) | Controller | +| \\(G\_d\\) | Disturbance model | +| \\(r\\) | Reference inputs | +| \\(n\\) | Measurement noise | +| \\(y\\) | Plant outputs | +| \\(y\_m\\) | Measurements | +| \\(u\\) | Control signals | + + +
+ Table 2: + Notations for the general configuration +
+ +| Notation | Meaning | +|----------|-------------------------------------------------| +| \\(P\\) | Generalized plant model | +| \\(w\\) | Exogenous inputs: commands, disturbances, noise | +| \\(z\\) | Exogenous outputs: signals to be minimized | +| \\(v\\) | Controller inputs: measurements | +| \\(u\\) | Control signals | + + +
+ Table 3: + Notations for transfer functions +
+ +| Notation | Meaning | +|----------|---------------------------------------------------------------------| +| \\(L\\) | Loop gain: \\(L = GK\\) | +| \\(S\\) | Sensitivity function: \\(S = (I + L)^{-1}\\) | +| \\(T\\) | Complementary sensitivity function: \\(T = (I + L)\*(I + L)^{-1}\\) | + + +## Classical Feedback Control {#classical-feedback-control} + + + + +### Frequency Response {#frequency-response} + +By replacing \\(s\\) by \\(j\omega\\) in a transfer function \\(G(s)\\), we get the **frequency response** description. It can be used to describe: + +- A system's response to sinusoids of varying frequency +- The frequency content of a **deterministic signal** via the **Fourier transform** +- The frequency distribution of a **stochastic** signal via the **power spectral density** + +After sending a sinusoidal signal through a system \\(G(s)\\), the signal's magnitude is amplified by a factor \\(\abs{G(j\omega)}\\) and its phase is shifted by \\(\angle{G(j\omega)}\\).
+ +
+
+ +**minimum phase systems** are systems with no time delays or RHP-zeros. + +The name minimum phase refers to the fact that such a system has the minimum possible phase lag for the given magnitude response \\(|G(j\omega)|\\). + +**RHP-zeros** and **time delays** contribute additional phase lag to a system when compare to that of a minimum phase system with the same gain (hence the term **non-minimum phase system**). + +
+ +For minimum phase systems, there is a unique relationship between the gain and phase of the frequency response: the **Bode gain-phase relationship**: + +\begin{equation} + \angle{G(j\w\_0)} = \frac{1}{\pi} \int\_{-\infty}^{\infty} \frac{d\ln{\abs{G(j\w)}}}{d\ln{\w}} \ln{\abs{\frac{\w+\w\_0}{\w-\w\_0}}} \frac{d\w}{\w} +\end{equation} + +We note \\(N(\w\_0) = \left( \frac{d\ln{|G(j\w)|}}{d\ln{\w}} \right)\_{\w=\w\_0}\\) that corresponds to the **slope of the magnitude** of \\(G(s)\\) in log-variables. We then have the following approximation of the **Bode gain-phase relationship**: + +\begin{equation} + \tcmbox{\angle{G(j\w\_0)} \approx \frac{\pi}{2} N(\w\_0)} +\end{equation} + + +### Feedback Control {#feedback-control} + + +#### One Degree-of-Freedom Controller {#one-degree-of-freedom-controller} + +The simple one degree-of-freedom controller negative feedback structure is represented in Fig. [fig:classical_feedback_alt](#fig:classical_feedback_alt). + +The input to the controller \\(K(s)\\) is \\(r-y\_m\\) where \\(y\_m = y+n\\) is the measured output and \\(n\\) is the measurement noise. +Thus, the input to the plant is \\(u = K(s) (r-y-n)\\). +The objective of control is to manipulate \\(u\\) (design \\(K\\)) such that the control error \\(e\\) remains small in spite of disturbances \\(d\\). +The control error is defined as \\(e = y-r\\). + + + +{{< figure src="/ox-hugo/skogestad07_classical_feedback_alt.png" caption="Figure 1: Configuration for one degree-of-freedom control" >}} + + +#### Closed-loop Transfer Functions {#closed-loop-transfer-functions} + +
+
+ +\begin{subequations} + \begin{align} + y &= T r + S G\_d d + T n\\\\\\ + e &= -S r + S G\_d d - T n\\\\\\ + y &= KS r - KS G\_d d - KS n + \end{align} +\end{subequations} + +
+ + +#### Why Feedback? {#why-feedback} + +We could think that we can use a "perfect" feedforward controller \\(K\_r(s) = G^{-1}(s)\\) with \\(r-G\_d d\\) as the controller input: +\\[ y = G u + G\_d d = G K\_r (r - G\_d d) + G\_d d = r \\] +Unfortunately, \\(G\\) is never an exact model and the disturbances are never known exactly. + +
+
+ +- Signal uncertainty +- Unknown disturbance +- Model uncertainty +- An unstable plant + +
+ + +### Closed Loop Stability {#closed-loop-stability} + +Two methods are commonly used to **determine closed-loop stability**: + +1. The system is stable if and only if **all the closed-loop poles** (roots of \\(1 + L(s) = 0\\)) **are in the open LHP**. The poles are also equal to the **eigenvalues of the state-space \\(A\\) matrix** (this is how the poles are computed). +2. The frequency response of \\(L(j\w)\\) is plotted in the complex plane and the number of encirclement it makes around the critical point \\(-1\\) is counted. + - **Nyquist's stability criterion**: Closed-loop stability is inferred by equating the number of encirclement to the number of open-loop RHP-poles + - **Bode's stability condition**: The closed loop system is stable if and only if \\(\vert L(j \w\_{180})\vert < 1\\) where \\(\w\_{180}\\) is the phase crossover frequency defined by \\(\angle L(j \w\_{180})=\ang{-180}\\). This is only valid for open-loop stable systems where \\(\angle L(j\w)\\) falls with frequency and such that \\(\angle L(j\w)\\) crosses \\(\ang{-180}\\) only once. + +Method 1 is best suited for numerical calculation while method 2 has a nice graphical interpretation and may also be used for systems with time delays. +Moreover, method 2 provides useful measure of relative stability and will be used for robustness test. + + +### Evaluating Closed-Loop Performance {#evaluating-closed-loop-performance} + + +#### Gain Margin {#gain-margin} + +The **Gain Margin** is defined as: + +\begin{equation} + \tcmbox{\text{GM} = \frac{1}{|L(j\w\_{180})|}} +\end{equation} + +with \\(\w\_{180}\\) is the **phase crossover frequency** defined by \\(\angle L(j \w\_{180}) = \ang{-180}\\). +If there is more than one crossing (\\(\angle L(j \w\_{180}) = \ang{-180}\\)), the largest value of \\(\vert L(j\w\_{180})\vert\\) is taken. + +The GM is the factor by which the loop gain \\(\vert L(s)\vert\\) may be increased before the closed-loop system becomes unstable. + + +#### Phase Margin {#phase-margin} + +The **Phase Margin** is defined as: + +\begin{equation} + \tcmbox{\text{PM} = \angle L(j \w\_c) + \ang{180}} +\end{equation} + +with \\(\w\_c\\) the **gain crossover frequency** defined by \\(\vert L(j \w\_c)\vert = 1\\). + +The PM tells how much negative phase (phase lag) we can add to \\(L(s)\\) at frequency \\(\omega\_c\\) before closed-loop instability appears. + +Typically, we required the PM to be larger than \\(\SI{30}{\degree}\\). This is a **safeguard against time delay uncertainty**, the system becomes unstable is we add a delay of \\(\theta\_{max} = PM / \w\_c\\). + +Note that by decreasing the value of \\(\omega\_c\\) (lowering the closed-loop bandwidth) the system can tolerate larger time delays. + + +#### Maximum Peak Criteria {#maximum-peak-criteria} + +
+
+ +\begin{subequations} + \begin{align} + M\_S &= \max\_{\w} \abs{S(j\w)} = \hnorm{S}\\\\\\ + M\_T &= \max\_{\w} \abs{T(j\w)} = \hnorm{T} + \end{align} +\end{subequations} + +
+ +Typically, we require \\(M\_S < 2\ (6dB)\\) and \\(M\_T < 1.25\ (2dB)\\).
+ +**Why do we want \\(M\_S\\) small?** + +- Without feedback, with have \\(e = r - G\_d d\\) but with feedback \\(e = S(r - G\_d d)\\). Thus feedback improves performance in terms of reducing \\(|e|\\) where \\(|S|<1\\). However, we cannot avoid having \\(|S|>1\\) at some intermediate frequency where feedback control degrades performance. The value of \\(M\_S\\) is then a **measure of the worst-case performance degradation** +- \\(M\_S\\) is also a **measure of the robustness** because the smallest distance between \\(L(\w)\\) and the critical point \\(-1\\) is \\({M\_S}^{-1}\\) + +There is a close **relationship between these maximum peaks and the gain and phase margins**. +For a given value of \\(M\_S\\), we have: + +\begin{equation} + \tcmbox{\text{GM} \geq \frac{M\_S}{M\_S-1}; \quad \text{PM} \geq \frac{1}{M\_S}} +\end{equation} + +Example of guaranteed stability margins: + +- \\(M\_S < 2 \Rightarrow GM > 2\\) and \\(PM > \SI{29}{\degree}\\) +- \\(M\_T < 2 \Rightarrow GM > 1.5\\) and \\(PM > \SI{29}{\degree}\\) + + +#### Bandwidth and Crossover Frequency {#bandwidth-and-crossover-frequency} + +In general, a large bandwidth corresponds to a faster rise time, however, this also indicates an higher sensitivity to noise and to parameter variations. + +
+
+ +The bandwidth, is the frequency range \\([\w\_1, \w\_2]\\) over which control is **effective**. In most case we simple call \\(\w\_2 = \w\_B\\) the bandwidth. + +
+ +As the word "effective" may be interpreted in different ways, there are **multiple definitions of bandwidth**: + +- The **closed-loop bandwidth** \\(\w\_B\\) is the frequency where \\(\vert S(j\w)\vert\\) first crosses \\(1/\sqrt{2}\approx -3dB\\) from below. +- The **gain crossover frequency** \\(\w\_c\\) is defined as the frequency where \\(\vert L(j \w\_c)\vert\\) first crosses 1 from above +- The bandwidth in terms of \\(T\\), \\(\w\_{BT}\\), is the highest frequency at which \\(\vert T(j \w\_c)\vert\\) crosses \\(1/\sqrt{2}\approx -3dB\\) from above. + +For systems with \\(PM < \ang{90}\\), we have: \\(\w\_{B} <\w\_{c} < \w\_{BT}\\) +Then we have the following regions: + +- \\(\w < \w\_B\\): \\(|S|<0.7\\) and control is effective +- \\(\w\_B < \w < \w\_{BT}\\): we may have \\(|S| > 1\\) and control degrades performance +- \\(\w\_{BT} < \w\\): \\(|S| \approx 1\\) and control has no significant effect on the response + +The closed-loop time constant \\(\tau\_{\text{cl}}\\) can be related to the bandwidth: + +\begin{equation} + \tcmbox{\tau\_{\text{cl}} \approx \frac{1}{\w\_b}} +\end{equation} + + +### Controller Design {#controller-design} + +There is 3 mains approaches to controller design: + +1. **Shaping of transfer functions**. The designer specifies the magnitude of some transfer functions as a function of frequency and then finds a controller which gives the desired shape(s) + 1. Loop shaping of the open-loop transfer function \\(L(j\w)\\) + 2. Shaping of closed-loop transfer functions such as \\(S\\), \\(T\\) and \\(KS\\) +2. **The signal based approach**. This involves time domain problem formulations resulting in the minimization of a norm of a transfer function. Linear Quadratic Gaussian (LQG) is an example of a signal based approach. A signal based \\(\hinf\\) optimal control methodology can be derived. +3. **Numerical optimization**. This often involves multi-objective optimization where one attempts to optimize directly the true objectives such as rise times, stability margins, ... This problems may be difficult to solve, especially if one does not have convexity in the control parameters. This optimization may also be performed online. + + +### Loop Shaping {#loop-shaping} + + +#### Trade-offs in Terms of \\(L\\) {#trade-offs-in-terms-of--l} + +Let's consider a feedback control system with error \\(e = -S r + S G\_d d - T n\\). +If we want perfect control: + +- For **disturbance rejection** and **command tracking**, we obtain \\(S \approx 0\\), this implies that the loop transfer function \\(L\\) must be large in magnitude +- For zero **noise transmission**, we want \\(T \approx 0\\) or equivalently \\(S \approx I\\) which is obtained with \\(L \approx 0\\). + +This illustrate the **fundamental nature of feedback** design which always involves a **trade-off** between conflicting objectives. + +The most important design objectives are: + +Performance +: \\(L\\) large + +Good dist. rejection +: \\(L\\) large + +Limitation of meas. noise on plant output +: \\(L\\) small + +Small magnitude of input signal +: \\(K\\) and \\(L\\) small + +Strictly proper controller +: \\(K\rightarrow 0\\) at high frequencies + +Nominal stability +: \\(L\\) small (RHP zeros and time delays) + +Robust stability +: \\(L\\) small (neglected dynamics) + +Fortunately, the conflicting design objectives are generally in different frequency ranges, and we can meet most of the objectives by using large loop gain at low frequencies and a small gain at high frequencies above crossover. + + +#### Fundamentals of Loop-Shaping Design {#fundamentals-of-loop-shaping-design} + +
+
+ +Design procedure that involves explicitly shaping the magnitude of the loop transfer function \\(\abs{L(j\w)}\\). + +
+ +To get the benefits of feedback control, we want the loop gain \\(\abs{L(j\w)}\\) to be as large as possible within the bandwidth region. +However, due to time delays, RHP-zeros, unmodelled high-frequency dynamics and limitations on the allowed manipulated inputs, the loop gain has to drop below one at and above the crossover frequency \\(\w\_c\\). + +
+
+ +To measure how \\(\abs{L(j\w)}\\) falls with frequency, we consider the **logarithmic slope**: + +\begin{equation} + N = \frac{d \ln{\abs{L}}}{d \ln{\w}} +\end{equation} + +The value of \\(-N\\) at high frequencies is called the **roll-off rate**. + +
+ +To get a high bandwidth (fast response) we want \\(\w\_c\\) large (thus \\(\w\_{180}\\) large), that is we want the phase lag in \\(L\\) to be small. Unfortunately, that is not consistent with the desire that \\(\abs{L(j\w)}\\) should fall sharply (because of the approximation \\(\angle{L} \approx -N \* \SI{90}{\degree}\\)). + +The situation becomes even worse for cases with delays or RHP-zeros in \\(L(s)\\) which add undesirable phase lag without contributing to a desirable negative slope.
+ +We can define the **desired loop transfer function in terms of the following specifications**: + +1. The gain crossover frequency \\(\w\_c\\), where \\(\abs{L(j\w\_c)} = 1\\) +2. The shape of \\(\abs{L(j\w)}\\): + - Slope of \\(N=-1\\) around crossover + - Large roll-off at higher frequencies (\\(N>2\\)) + - Slope at low frequencies depending on the nature of the disturbance or reference signal. + We required a slope of \\(-1\\) for step changes and \\(-2\\) for ramp changes +3. The system type, defined as the number of pure integrators in \\(L(s)\\) + + +#### Limitations Imposed by RHP-zeros and Time Delays {#limitations-imposed-by-rhp-zeros-and-time-delays} + +We usually want the loop shape to have a slope of \\(-1\\) around crossover \\(\w\_c\\), then the phase lag of \\(L\\) at \\(\w\_c\\) will be at least \\(\SI{-90}{\degree}\\). +If we require a phase margin of \\(\SI{-35}{\degree}\\), then the additional phase contribution from delays and RHP zeros at \\(\w\_c\\) cannot exceed \\(\SI{-55}{\degree}\\). + +First consider a **time delay** \\(\theta\\) which adds a phase of \\(-\theta \omega\\). +Thus, we want \\(\theta \omega\_c < \SI{55}{\degree} \approx \SI{1}{\radian}\\). +The attainable bandwidth is limited by the time delay: + +\begin{equation} + \tcmbox{\omega\_c < 1/\theta} +\end{equation} + +Next consider a **RHP-zero** at \\(s = z\\). +To avoid an increase in slope cause by the zero, we add a pole at \\(s = -z\\), then \\(L\\) contains \\(\frac{-s+z}{s+z}\\) which corresponds to an all-pass filter. +The phase contribution is \\(\approx \SI{-55}{\degree}\\) at \\(\w = z/2\\). +Thus, this limits the attainable bandwidth: + +\begin{equation} + \tcmbox{\w\_c < z/2} +\end{equation} + + +#### Inverse-Based Controller Design {#inverse-based-controller-design} + +The idea is to have \\(L(s) = \frac{\w\_c}{s}\\) with \\(\w\_c\\) the desired gain crossover frequency. +The controller associated is then \\(K(s) = \frac{\w\_c}{s}G^{-1}(s)\\) {the plant is inverted and an integrator is added}. +This idea is the essential part of the **internal model control** (IMC). +This loop shape yields a phase margin of \\(\SI{90}{\degree}\\) and an infinite gain margin.
+ +They are many reasons why the inverse-based controller may **not** be a good choice: + +- The controller will not be realizable if \\(G(s)\\) has a pole excess of two or larger +- The loop shape is not generally desirable, unless the references and disturbances are steps + + +#### Loop Shaping for Disturbance Rejection {#loop-shaping-for-disturbance-rejection} + +We have \\(e = S G\_d d\\) with \\(\abs{d(j\w)} < 1\\) at each frequency (thanks to scaling). +The main control objective is to achieve \\(\abs{e(j\w)} < 1\\). +Then, we require: \\(\abs{S(j\w) G\_d(j\w)} < 1, \forall \w\\) or equivalently \\(\abs{1 + L(j\w)} > \abs{G\_d}, \forall \w\\).
+ +Note that we don't want to have larger loop gain than necessary to not increase input signals and sensitivity to noise. +A reasonable loop shape is then \\(\abs{L} = \abs{G\_d}\\). + +The corresponding controller satisfies + +\begin{equation} + \abs{K} = \abs{G^{-1}G\_d} +\end{equation} + +This means that: + +- For disturbances entering at the plant output (\\(G\_d = 1\\)), we get \\(\abs{K} = \abs{G^{-1}}\\) +- For disturbances entering at the plant input (\\(G\_d = G\\)), we get \\(\abs{K} = 1\\) +- Note that reference change may be viewed as a disturbance directly affecting the output + +The loop-shape \\(L(s)\\) may be modify as follows: + +- Around crossover, make the slope of \\(|L|\\) to be about -1. This is to achieve good transient behavior with acceptable gain and phase margins +- Improve the low frequency performance by adding integral action \\(\abs{K} = \abs{\frac{s+\w\_I}{s}}\abs{G^{-1}G\_d}\\) +- Let \\(L(s)\\) roll of faster at high frequencies in order to reduce the effect of noise and the input magnitude + + +#### Two Degrees-of-freedom Design {#two-degrees-of-freedom-design} + +For reference tracking, we typically want the controller to look like \\(\frac{1}{s} G^{-1}\\), whereas for disturbance rejection we want the controller to look like \\(\frac{1}{s} G^{-1}G\_d\\). + +We cannot achieve both of these simultaneously with a single feedback controller. + +The solution is to use a **two degrees of freedom controller** where the reference signal \\(r\\) and output measurement \\(y\_m\\) are independently treated by the controller (Fig. [fig:classical_feedback_2dof_alt](#fig:classical_feedback_2dof_alt)), rather than operating on their difference \\(r - y\_m\\). + + + +{{< figure src="/ox-hugo/skogestad07_classical_feedback_2dof_alt.png" caption="Figure 2: 2 degrees-of-freedom control architecture" >}} + +The controller can be slit into two separate blocks (Fig. [fig:classical_feedback_sep](#fig:classical_feedback_sep)): + +- the **feedback controller** \\(K\_y\\) that is used to **reduce the effect of uncertainty** (disturbances and model errors) +- the **prefilter** \\(K\_r\\) that **shapes the commands** \\(r\\) to improve tracking performance + + + +{{< figure src="/ox-hugo/skogestad07_classical_feedback_sep.png" caption="Figure 3: 2 degrees-of-freedom control architecture with two separate blocs" >}} + +It is optimal to design the combined two degrees of freedom controller \\(K\\) in one step, however, in practice \\(K\_y\\) is often designed first for disturbance rejection, and then \\(K\_r\\) is designed to improve reference tracking. + + +### Shaping Closed-Loop Transfer Functions {#shaping-closed-loop-transfer-functions} + +Specifications on the open-loop transfer function \\(L = GK\\) does not consider directly the closed-loop transfer functions, such as \\(S\\) and \\(T\\) which determine the final response. +An alternative design strategy is to directly shape the magnitude of the closed loop transfer functions. This strategy can be formulated as an \\(\hinf\\) optimal control problem. + + +#### The Terms \\(\hinf\\) and \\(\htwo\\) {#the-terms--hinf--and--htwo} + +The \\(\hinf\\) norm of a stable scalar transfer function \\(f(s)\\) is simply the peak value of \\(\abs{f(j\w)}\\) as a function of frequency: + +\begin{equation} + \tcmbox{\hnorm{f(s)} \triangleq \max\_{\w} \abs{f(j\w)}} +\end{equation} + +Similarly, the symbol \\(\htwo\\) stands for the Hardy space of transfer function with bounded 2-norm: + +\begin{equation} + \tcmbox{\normtwo{f(s)} \triangleq \left( \frac{1}{2\pi} \int\_{-\infty}^{\infty} \abs{f(j\w)}^2 d\w \right)^{1/2}} +\end{equation} + + +#### Weighted Sensitivity {#weighted-sensitivity} + +The sensitivity function \\(S\\) is a very good indicator of closed-loop performance. The main advantage of considering \\(S\\) is that we want \\(S\\) small and **it is sufficient to consider just its magnitude** \\(\abs{S}\\). + +
+
+ +- Minimum bandwidth frequency \\(\w\_B^\*\\) +- Maximum tracking error at selected freq. +- The maximum steady state tracking error \\(A\\) +- Shape of \\(S\\) over selected frequency ranges +- Maximum magnitude of \\(S\\): \\(\hnorm{S(j\w)} \leq M\\) + +
+ +The maximum peak specification prevents amplification of noise at high frequencies, and also introduces a margin of robustness. Typically, we select \\(M = 2\\).
+ +Mathematically, these specifications may be captured by an **upper bound** \\(1/\abs{W\_P(s)}\\) on the magnitude of \\(S\\) where \\(W\_P(s)\\) is a **weight** selected by the designer. +The subscript \\(P\\) stands for **performance** since \\(S\\) is mainly used as a performance indicator. + +The performance requirement becomes +\\[ S(j\w) < 1/\abs{W\_P(j\w)}, \forall \w \\] +Which can be expressed as an \\(\mathcal{H}\_\infty\\): + +\begin{equation} + \tcmbox{\hnorm{W\_P S} < 1} +\end{equation} + +
+
+ +\\[W\_P(s) = \frac{s/M + \w\_B^\*}{s + \w\_B^\* A}\\] + +With (see Fig. [fig:performance_weigth](#fig:performance_weigth)): + +- \\(M\\): maximum magnitude of \\(\abs{S}\\) +- \\(\w\_B\\): crossover frequency +- \\(A\\): steady-state offset + +
+ + + +{{< figure src="/ox-hugo/skogestad07_weight_first_order.png" caption="Figure 4: Inverse of performance weight" >}} + +If we want a steeper slope for \\(L\\) below the bandwidth, an higher order weight may be selected. A weight which ask for a slope of \\(-2\\) for \\(L\\) below crossover is: +\\[W\_P(s) = \frac{(s/M^{1/2} + \w\_B^\*)^2}{(s + \w\_B^\* A^{1/2})^2}\\] + + +#### Stacked Requirements: Mixed Sensitivity {#stacked-requirements-mixed-sensitivity} + +The specification \\(\hnorm{W\_P S} < 1\\) puts a lower bound on the bandwidth, but not an upper one and nor does it allow us to specify the roll-off of \\(L(s)\\) above the bandwidth. + +To do this, we can make demands on another closed-loop transfer function \\(T\\) by specifying an upper bound \\(1/\abs{W\_T}\\) on the magnitude \\(\abs{T}\\) to **make sure that \\(L\\) rolls off sufficiently fast at high frequencies**. + +Also, to achieve robustness or to restrict the magnitude of the input signal \\(u\\), one may place an upper bound \\(1/\abs{W\_U}\\) on the magnitude \\(KS\\).
+ +To combined these **mixed sensitivity specifications**, a **stacking approach** is usually used, resulting in the following overall specification: +\\[\maxw \maxsv(N(j\w)) < 1; \quad N = \colvec{W\_P S \\ W\_T T \\ W\_U KS}\\] + +After selecting the form of \\(N\\) and the weights, the \\(\hinf\\) optimal controller is obtained by solving the problem \\(\min\_K\hnorm{N(K)}\\). + + +## Introduction to Multivariable Control {#introduction-to-multivariable-control} + + + + +### Introduction {#introduction} + +The main difference between a SISO system and a MIMO system is the presence of **directions** in the latter. + +However, most of the ideas and techniques used for SISO systems may be extended to MIMO systems. +This is done by considering the **maximum singular value** instead of the absolute value. + +The **singular value decomposition** (SVD) provides a useful way of quantifying multivariable directionality. + +For MIMO systems the gain \\(\frac{\abs{Gd}}{\abs{d}}\\) (where \\(\abs{\cdot}\\) is some norm) is independent of the magnitude \\(\abs{d}\\) (like for SISO systems), but it does **depend on its direction**. + +A plant is said to be **ill-conditioned** if the gain depends strongly on the input direction. It is quantified by the **condition number** \\(\Gamma\\) (which is much larger than 1 for an ill-conditioned plant). + +For MIMO systems the order of the transfer functions matter, so in general: + +\begin{equation} + \tcmbox{GK \neq KG} +\end{equation} + +even when \\(G\\) and \\(K\\) are square matrices. + + +### Transfer Functions {#transfer-functions} + +
+
+ +The main rule for evaluating transfer functions is the **MIMO Rule**: Start from the output and write down the transfer functions as you meet them going to the input. If you exit a feedback loop then we get a term \\((I-L)^{-1}\\) where \\(L = GK\\) is the transfer function around the loop (gain going backwards). + +
+ + +#### Negative Feedback Control Systems {#negative-feedback-control-systems} + +For negative feedback system (Fig. [fig:classical_feedback_bis](#fig:classical_feedback_bis)), we define \\(L\\) to be the loop transfer function as seen when breaking the loop at the **output** of the plant: + +- \\(L = G K\\) +- \\(S \triangleq (I + L)^{-1}\\) is the transfer function from \\(d\_1\\) to \\(y\\) +- \\(T \triangleq L(I + L)^{-1}\\) is the transfer function from \\(r\\) to \\(y\\) + + + +{{< figure src="/ox-hugo/skogestad07_classical_feedback_bis.png" caption="Figure 5: Conventional negative feedback control system" >}} + +We define \\(L\_1\\) to be the loop transfer function as seen when breaking the loop at the **input** to the plant: + +- \\(L\_1 = K G\\) +- \\(S\_1 \triangleq (I + L\_1)^{-1}\\) +- \\(T\_1 \triangleq L\_1(I + L\_1)^{-1}\\) is the transfer function from \\(d\_2\\) to \\(-u\\) + + +### Multivariable Frequency Response Analysis {#multivariable-frequency-response-analysis} + + +#### Obtaining the Frequency Response from \\(G(s)\\) {#obtaining-the-frequency-response-from--gs} + +Consider the system \\(G(s)\\) with input \\(d(s)\\) and output \\(y(s)\\). +The element \\(g\_{ij}(j\w)\\) of the matrix \\(G\\) represents the sinusoidal response from the input \\(j\\) to output \\(i\\). + + +#### Directions in Multivariable Systems {#directions-in-multivariable-systems} + +For a SISO system, the gain at \\(\omega\\) is simply: + +\begin{equation} + \frac{|y(\w)|}{|d(\w)|} = \frac{|G(j\w)d(\w)|}{|d(\w)|} = |G(j\w)| +\end{equation} + +The gain depends on the frequency \\(\w\\) but it is independent of the input magnitude \\(|d(\w)|\\). + +For MIMO systems, we have to use norms to measure the amplitude of the inputs/outputs. +If we select vector 2-norm, the magnitude of the vector input signal is: +\\[ \normtwo{d(\w)} = \sqrt{\sum\_j |d\_j(\w)|^2} \\] + +The gain of the system is then: + +\begin{equation} + \frac{\normtwo{y(\w)}}{\normtwo{d(\w)}} = \frac{\normtwo{G(j\w)d(\w)}}{\normtwo{d(\w)}} = \frac{\sqrt{\sum\_j |y\_j(\w)|^2}}{\sqrt{\sum\_j |d\_j(\w)|^2}} +\end{equation} + +Again the gain depends on the frequency \\(\w\\) and again it is independent of the input magnitude \\(\normtwo{d(\w)}\\). However, the gain depends also on the **direction** of the input \\(d\\). + + +#### Eigenvalues as a Poor Measure of Gain {#eigenvalues-as-a-poor-measure-of-gain} + +The magnitudes of the eigenvalues of a transfer function matrix \\(\abs{\lambda\_i(G(j\w))}\\) do not provide a useful means of generalizing the SISO gain. +The main problem is that the eigenvalues measure the gain for the special case when the **inputs and the outputs are in the same direction**, namely in the direction of the eigenvectors. + + +#### Singular Value Decomposition {#singular-value-decomposition} + +We are interested by the physical interpretation of the SVD when applied to the frequency response of a MIMO system \\(G(s)\\) with \\(m\\) inputs and \\(l\\) outputs. + +
+
+ +\begin{equation} +G = U \Sigma V^H +\end{equation} + +\\(\Sigma\\) +: is an \\(l \times m\\) matrix with \\(k = \min\\{l, m\\}\\) non-negative **singular values** \\(\sigma\_i\\), arranged in descending order along its main diagonal, the other entries are zero. + +\\(U\\) +: is an \\(l \times l\\) unitary matrix. The columns of \\(U\\), denoted \\(u\_i\\), represent the **output directions** of the plant. They are orthonormal. + +\\(V\\) +: is an \\(m \times m\\) unitary matrix. The columns of \\(V\\), denoted \\(v\_i\\), represent the **input directions** of the plant. They are orthonormal. + +
+ +The input and output directions are related through the singular values: + +\begin{equation} + \tcmbox{G v\_i = \sigma\_i u\_i} +\end{equation} + +So, if we consider an input in the direction \\(v\_i\\), then the output is in the direction \\(u\_i\\). Furthermore, since \\(\normtwo{v\_i}=1\\) and \\(\normtwo{u\_i}=1\\), we see that **the singular value \\(\sigma\_i\\) directly gives the gain of the matrix \\(G\\) in this direction**.
+ +The **largest gain** for any input is equal to the **maximum singular value**: +\\[\maxsv(G) \equiv \sigma\_1(G) = \max\_{d\neq 0}\frac{\normtwo{Gd}}{\normtwo{d}} = \frac{\normtwo{Gv\_1}}{\normtwo{v\_1}} \\] +The **smallest gain** for any input direction is equal to the **minimum singular value**: +\\[\minsv(G) \equiv \sigma\_k(G) = \min\_{d\neq 0}\frac{\normtwo{Gd}}{\normtwo{d}} = \frac{\normtwo{Gv\_k}}{\normtwo{v\_k}} \\] + +We define \\(u\_1 = \bar{u}\\), \\(v\_1 = \bar{v}\\), \\(u\_k=\ubar{u}\\) and \\(v\_k = \ubar{v}\\). Then is follows that: +\\[ G\bar{v} = \maxsv \bar{u} ; \quad G\ubar{v} = \minsv \ubar{u} \\] + + +#### Non Square Plants {#non-square-plants} + +If the plant has more output than inputs, the outputs singular vectors \\(u\_i\\) with \\(i > k\\) correspond to the outputs directions that cannot be controlled. + +Similarly, for a plant with more inputs and outputs, the additional input singular vectors tells us in which directions the input will have no effect. + + +#### Singular Values for Performance {#singular-values-for-performance} + +The gain of the MIMO system from the vector of reference inputs \\(r\\) and the vector of control error \\(e\\) is bounded by the minimum and maximum singular values of \\(S\\): +\\[ \minsv(S(j\w)) < \frac{\normtwo{e(\w)}}{\normtwo{r(\w)}} < \maxsv(S(j\w)) \\] + +In terms of performance, we require that the gain remains small for any direction of \\(r(\w)\\) including the "worst-case" direction corresponding to the gain \\(\maxsv(S(j\w))\\). Let \\(1/\abs{W\_P(j\w)}\\) represent the maximum allowed magnitude of \\(\frac{\normtwo{e(\w)}}{\normtwo{r(\w)}}\\) at each frequency: +\\[ \maxsv(S(j\w)) < \frac{1}{\abs{W\_P}}, \forall \w \Leftrightarrow \hnorm{W\_P S} < 1 \\] + +
+
+ +The \\(\hinf\\) norm is defined as the peak of the maximum singular value of the frequency response: + +\begin{equation} + \hnorm{M(s)} \triangleq \max\_{\w} \maxsv(M(j\w)) +\end{equation} + +
+ +For MIMO systems **the bandwidth depends on direction**. +If we want to associate a single bandwidth frequency for a multivariable system, then we consider the worst-case direction, and define the bandwidth \\(\w\_B\\) as the frequency where \\(\maxsv(S)\\) crosses \\(\frac{1}{\sqrt{2}} = 0.7\\) from below. + + +### Control of Multivariable Plants {#control-of-multivariable-plants} + +A conceptually simple approach to multivariable control is given by a two-step procedure: + +1. **Design a pre-compensator** \\(W\_1\\), which counteracts the interactions in the plant and results in a new **shaped plant** \\(G\_S(s) = G(s) W\_1(s)\\) which is **more diagonal and easier to control** than the original plant \\(G(s)\\). +2. **Design a diagonal controller** \\(K\_S(s)\\) for the shaped plant using methods similar to those for SISO systems. + +The overall controller is then: +\\[ K(s) = W\_1(s)K\_s(s) \\] + + +#### Decoupling {#decoupling} + +There are mainly three different cases: + +1. **Dynamic decoupling**: \\(G\_S(s)\\) is diagonal at all frequencies. For that we can choose \\(W\_1(s) = G^{-1}(s)\\) and this is an inverse-based controller. +2. **Steady-state decoupling**: \\(G\_S(0)\\) is diagonal. This can be obtained by selecting \\(W\_1(s) = G^{-1}(0)\\). +3. **Approximate decoupling at frequency \\(\w\_0\\)**: \\(G\_S(j\w\_0)\\) is as diagonal as possible. Decoupling the system at \\(\w\_0\\) is a good choice because the effect on performance of reducing interaction is normally greatest at this frequency. + +The idea of decoupling control is appealing, but there are **several difficulties**: + +1. It is very sensitive to modelling errors +2. It may not be required for disturbance rejection +3. If the plant has RHP-zero, the decoupling generally introduces extra RHP-zero in the closed-loop system + + +#### SVD-Controller {#svd-controller} + +We can also introduce a **post compensator** \\(W\_2(s)\\). +The shaped plant is then: +\\[G\_S(s) = W\_2(s)G(s)W\_1(s)\\] + +A diagonal controller \\(K\_S\\) can then be designed for the shaped plant. The overall controller is then: +\\[K(s) = W\_1(s)K\_S(s)W\_2(s)\\] + +The **SVD-controller** is a special case of a pre and post compensator design: \\(W\_1 = V\_0\\) and \\(W\_2 = U\_0^T\\). +\\(V\_0\\) and \\(U\_0\\) are obtained from a SVD of \\(G\_0 = U\_0 \Sigma\_0 V\_0^T\\) where \\(G\_0\\) is a real approximation of \\(G(j\w\_0)\\). + + +#### Decentralized Control {#decentralized-control} + +Another approach is to use a diagonal or block-diagonal controller \\(K(s)\\). This works well if \\(G(s)\\) is close to diagonal, because then the plant to be controlled is essentially a collection of independent sub-plants, and each element in \\(K(s)\\) may be designed independently. +However, if off-diagonal elements in \\(G(s)\\) are large, the performance with decentralized diagonal control may be poor because no attempt is made to counteract the interactions. + + +#### What is the Shape of the "best" Feedback Controller? {#what-is-the-shape-of-the-best-feedback-controller} + +Consider the problem of disturbance rejection: \\(y = S G\_d d\\) where \\(\normtwo{d}<1\\) and our performance requirement is that \\(\normtwo{y}<1\\) which is equivalent to requiring \\(\maxsv(SG\_d) < 1\\). + +However there is generally a trade-off between input usage and performance. The controller that minimize the input magnitude while meeting the performance requirement is the one that yields all singular values of \\(SG\_d\\) equal to 1, i.e. \\(\sigma\_i(SG\_d) = 1, \forall \w\\). This corresponds to: +\\[S\_{\text{min}} G\_d = U\_1\\] +Where \\(U\_1\\) is some all-pass transfer function (which at each frequency has all its singular values equal to 1). + +At frequencies where feedback is effective, we have \\(S\approx L^{-1}\\) and then \\(L\_{\text{min}} = GK\_{\text{min}} \approx G\_d U\_1^{-1}\\). +In conclusion, the controller and loop shape with the minimum gain will often look like: +\\[ K\_{\text{min}} \approx G^{-1} G\_d U\_2 \\] +where \\(U\_2 = U\_1^{-1}\\) is some all-pass transfer function matrix. + +We see that for disturbances entering at the plant inputs, \\(G\_d = G\\), we get \\(G\_{\text{min}} = U\_2\\), so a simple constant unit gain controller yields a good trade-off between output performance and input usage. + + +#### Summary of Mixed-Sensitivity \\(\hinf\\) Synthesis {#summary-of-mixed-sensitivity--hinf--synthesis} + +In the mixed-sensitivity \\(S/KS\\) problem, the objective is to minimize the \\(\hinf\\) norm of: + +\begin{equation} + N = \colvec{W\_P S \\ W\_U K S} +\end{equation} + +Here are some guidelines for the choice of the weights \\(W\_P\\) and \\(W\_U\\): + +- \\(KS\\) is the transfer function from \\(r\\) to \\(u\\), so for a system which has been scaled, a reasonable initial choice for the input weight is \\(W\_U = I\\) +- \\(S\\) is the transfer function from \\(r\\) to \\(-e = r-y\\). A common choice for the performance weight is \\(W\_P = \text{diag}\\{w\_{p\_i}\\}\\) with: + \\[ w\_{p\_i} = \frac{s/M\_i + \w\_{B\_i}^\*}{s + \w\_{B\_i}^\*A\_i}, \quad A\_i \ll 1 \\] + Selecting \\(A\_i \ll 1\\) ensures approximate integral action. + Often we select \\(M\_i\\) about 2 for all outputs, whereas \\(\w\_{B\_i}^\*\\) may be different for each output. + +For disturbance rejection, we may in some cases want a steeper slope for \\(w\_{P\_i}(s)\\) at low frequencies. +However it may be better to **consider the disturbances explicitly** by considering the \\(\hinf\\) norm of: + +\begin{equation} + N = \begin{bmatrix} + W\_P S & W\_P S G\_d \\\\\\ + W\_U K S & W\_U K S G\_d + \end{bmatrix} +\end{equation} + +We can also considerate \\(T\\) which is the transfer function from \\(-n\\) to \\(y\\). To reduce the sensitivity to noise and uncertainty, we want \\(T\\) small at high frequencies, and so we may want **additional roll-off** in \\(L\\). +This can be achieved in several ways: + +- One approach is to add \\(W\_T T\\) to the stack for \\(N\\) where \\(W\_T = \text{diag}\\{w\_{T\_i}\\}\\) and \\(\abs{w\_{T\_i}}\\) is smaller than 1 at low frequencies and large at high frequencies +- A more direct approach is to **add high-frequency dynamics** \\(W\_1(s)\\) **to the plant model** to ensure that the resulting shaped plant, \\(G\_S=GW\_1\\) rolls off with the desired slope. We then obtain an \\(\hinf\\) optimal controller \\(K\_S\\) for this shaped plant, and finally include \\(W\_1(s)\\) in the controller \\(K=W\_1 K\_S\\) + + +### Introduction to MIMO RHP-Zeros {#introduction-to-mimo-rhp-zeros} + +Whereas the poles \\(p\\) of MIMO system \\(G\\) are essentially poles of elements of \\(G\\), the zeros are generally not the zeros of elements of \\(G\\). +However, for square MIMO plants, the poles and zeros are in most cases the poles and zeros of \\(\det G(s)\\). + +
+
+ +The zeros \\(z\\) of a MIMO system \\(G\\) are defined as the values \\(s=z\\) where \\(G(s)\\) loses rank. + +
+ +As for SISO systems, we find that **RHP-zeros impose fundamental limitations on control**. +Poles and zeros of MIMO systems have **directions**: + +- We can find the **direction of a zero** by looking at the direction in which the matrix \\(G(z)\\) has zero gain +- Pole direction is direction where \\(G(p)\\) is infinite + +It is generally possible to move the effect of RHP-zero to particular outputs. +If it is not, the zero is called a "**pinned zero**". + + +### Condition Number and RGA {#condition-number-and-rga} + + +#### Condition Number {#condition-number} + +
+
+ +We define the condition number of a matrix as the ratio between its maximum and minimum singular values: + +\begin{equation} + \gamma(G) \triangleq \maxsv(G)/\minsv(G) +\end{equation} + +
+ +A matrix with large condition number is said to be **ill-conditioned**. + +For a non-singular square matrix \\(\minsv(G)=1/\maxsv(G^{-1})\\), so \\(\gamma(G) = \maxsv(G) \maxsv(G^{-1})\\). +It then follows that the condition number is large if the product of the largest element in \\(G\\) and \\(G^{-1}\\) is large. + +Note that the condition number depends strongly on scaling. One might consider minimizing the condition number over all possible scalings. +This results in the **minimized or optimal condition number** which is defined by: + +\begin{equation} + \gamma^\*(G) = \min\_{D\_1,D\_2} \gamma(D\_1 G D\_2) +\end{equation} + +If the condition number is small, then the multivariable effects of uncertainty are not likely to be serious. +However if the condition number is large (say, larger than 10), then this may indicate control problems. + + +#### Relative Gain Array (RGA) {#relative-gain-array--rga} + +
+
+ +The relative gain array (RGA) for a non-singular square matrix \\(G\\) is a square matrix defined as: + +\begin{equation} + \text{RGA}(G) = \Lambda(G) \triangleq G \times G^{-T} +\end{equation} + +where \\(\times\\) is element-by-element multiplication + +
+ +In most case, it is the value of the RGA at frequencies close to crossover which is most important. + +The RGA has interesting algebraic properties: + +- It is independent of input and output scaling +- Its rows and columns sum to one +- The sum-norm of the RGA \\(\\|\Lambda\\|\_\text{sum}\\) is close to the minimized condition number \\(\gamma^\*\\). + Plants with large RGA-elements are thus always ill-conditioned +- The RGA is the identity matrix if \\(G\\) is upper of lower triangular. This follows that \\(\Gamma - I\\) provides a **measure of two-way interactions** + +It has also a number of useful **control properties**: + +- Plants with large RGA-elements around the crossover frequency are fundamentally difficult to control because of sensitivity to input uncertainty +- If the sign of a RGA-element changes from \\(s=0\\) to \\(s=\infty\\), then there is a RHP-zero in \\(G\\) +- The definition of the RGA may be generalized to non-square matrices by using the pseudo inverse +- The **RGA-number** can be used as a measure of diagonal dominance: \\(\\|\Lambda(G)-I\\|\_{\text{sum}}\\) +- For decentralized control, we prefer pairing input and outputs for which the RGA-number at crossover frequencies is close to \\(0\\) + + +### Introduction to Robustness for MIMO Plants {#introduction-to-robustness-for-mimo-plants} + +Multivariable plants can show a sensitivity to uncertainty which is fundamentally different from what is possible in SISO systems. +It is possible to have excellent stability margins (GM and PM) when considering one loop at a time, but small simultaneous input gain errors can give instability. + +For SISO systems, we generally have that nominal performance and robust stability imply robust performance, but this is not the case for MIMO systems. + +Although we have **useful indicators of robustness problems** (RGA-number, Sensitivity Peaks, etc), they provide no exact answer to whether a given source of uncertainty will yield instability or poor performance. +The **structured singular value** \\(\mu\\) is a tool for analyzing the effects of model uncertainty. + + +### General Control Problem Formulation {#general-control-problem-formulation} + +The general control problem formulation is represented in Fig. [fig:general_control_names](#fig:general_control_names). + + + +{{< figure src="/ox-hugo/skogestad07_general_control_names.png" caption="Figure 6: General control configuration" >}} + +
+
+ +Find a controller \\(K\\) which based on the information in \\(v\\), generates a control signal \\(u\\) which counteracts the influence of \\(w\\) on \\(z\\), thereby minimizing the closed-loop norm from \\(w\\) to \\(z\\). + +
+ + +#### Obtaining the Generalized Plant \\(P\\) {#obtaining-the-generalized-plant--p} + +We must first find a block diagram representation of the system and identify the signals \\(w\\), \\(z\\), \\(u\\) and \\(v\\). +Then we have to break all the "loops" entering and exiting the controller \\(K\\) to obtain \\(P\\) such that: + +\begin{equation} + \colvec{z\\v} = P \colvec{w\\u} +\end{equation} + + +#### Controller Design: Including Weights in \\(P\\) {#controller-design-including-weights-in--p} + +In order to get a meaningful controller synthesis problem, for example in terms of the \\(\hinf\\) norms, we generally have to include the weights \\(W\_z\\) and \\(W\_w\\) in the generalized plant \\(P\\) (Fig. [fig:general_plant_weights](#fig:general_plant_weights)). +We consider: + +- The weighted or normalized exogenous inputs \\(w\\) (where \\(\tilde{w} = W\_w w\\) consists of the "physical" signals entering the system) +- The weighted or normalized controlled outputs \\(z = W\_z \tilde{z}\\) (where \\(\tilde{z}\\) often consists of the control error \\(y-r\\) and the manipulated input \\(u\\)) + + + +{{< figure src="/ox-hugo/skogestad07_general_plant_weights.png" caption="Figure 7: General Weighted Plant" >}} + +The weighted matrices are usually frequency dependent and typically selected such that weighted signals \\(w\\) and \\(z\\) are of magnitude 1. + + +#### Partitioning the Generalized Plant \\(P\\) {#partitioning-the-generalized-plant--p} + +We often partition \\(P\\) as: + +\begin{equation} + \begin{bmatrix} z \\ v \end{bmatrix} = \begin{bmatrix} + P\_{11} & P\_{12} \\\\\\ + P\_{21} & P\_{22} + \end{bmatrix} \begin{bmatrix} w \\ u \end{bmatrix} +\end{equation} + +\\(P\_{22}\\) has dimensions compatible with the controller. + + +#### Analysis: Closing the Loop the get \\(N\\) {#analysis-closing-the-loop-the-get--n} + +In the previous representations, the controller \\(K\\) has a separate block. This is useful when **synthesizing** the controller. However, for **analysis** of closed-loop performance the controller is given, and we may absorb \\(K\\) into the interconnection structure and obtain the system \\(N\\). + +
+
+ +\begin{equation} + z = N w +\end{equation} + +\\(N\\) is given by: +\\[N = P\_{11} + P\_{12}K(I-P\_{22}K)^{-1}P\_{12} \triangleq F\_l(P, K) \\] +where \\(F\_l(P, K)\\) denotes a **lower linear fractional transformation** (LFT). + +
+ + +#### A General Control Configuration Including Model Uncertainty {#a-general-control-configuration-including-model-uncertainty} + +The general control configuration may be extended to include model uncertainty as shown in Fig. [fig:general_config_model_uncertainty](#fig:general_config_model_uncertainty). + + + +{{< figure src="/ox-hugo/skogestad07_general_control_Mdelta.png" caption="Figure 8: General control configuration for the case with model uncertainty" >}} + +The matrix \\(\Delta\\) is a block-diagonal matrix that includes all possible perturbations (representing uncertainty). +It is usually normalized in such a way that \\(\hnorm{\Delta} \leq 1\\). + + +### Conclusion {#conclusion} + +
+
+ +The **Singular Value Decomposition** (SVD) of the plant transfer function matrix provides insight into **multivariable directionality**.
+ +Other useful tools for analyzing directionality and interactions are the **condition number** and the **Relative Gain Array** (RGA).
+ +**Closed loop performance** may be analyzed in the frequency domain by evaluating the **maximum singular value of the sensitivity function** as the function of frequency.
+ +**Multivariable RHP-zeros** impose fundamental limitations on performance, but for MIMO systems we can often direct the undesired effect of a RHP-zero to a subset of the outputs.
+ +MIMO systems are often **more sensitive to uncertainty** than SISO systems. + +
+ + +## Elements of Linear System Theory {#elements-of-linear-system-theory} + + + + +### System Descriptions {#system-descriptions} + +For linear systems there are several alternative system representations: + +- **state-space representation** often follows directly from a physical model, and is used in most **numerical calculations**. +- **transfer function representation** is a nice compact representation which yields invaluable insights; it allows for series connections to be represented by multiplication of transfer functions. It also leads directly to the frequency response by setting \\(s = j\w\\). +- **coprime factorization** is a factorization into two stable systems, and that it is useful for representing the class of all stabilizing controllers. It forms the basis for the very useful coprime uncertainty description. + + +#### State-Space Representation {#state-space-representation} + +A natural way to represent many physical systems is by nonlinear state-space models of the form +\\[\dot{x} \triangleq \frac{dx}{dt} = f(x, u);\quad y = g(x, u)\\] + +Linear state-space models may then be derived from the linearization of such models. + +\begin{align\*} +\dot{x}(t) & = A x(t) + B u(t)\\\\\\ +y(t) & = C x(t) + D u(t) +\end{align\*} + +where \\(A\\), \\(B\\), \\(C\\) and \\(D\\) are real matrices. + +These equations may be rewritten as +\\[\colvec{\dot{x}\\y} = \begin{bmatrix} +A & B \\\\\\ +C & D +\end{bmatrix} \colvec{x\\u}\\] +which gives rise to the short-hand notation +\\[G = \left[ \begin{array}{c|c} +A & B \\ \hline +C & D \\\\\\ +\end{array} \right]\\] + +The state-space representation of a system is not unique, there exist realizations with the same input-output behavior, but with additional unobservable and/or uncontrollable state. + +
+
+ +A minimal realization is a realization with the **fewest number of states** and consequently **no unobservable or uncontrollable modes**. + +
+ +The state-space representation yields an internal description of the system which may be useful if the model is derived from physical principles. It is also more suitable for numerical calculations. + + +#### Impulse Response Representation {#impulse-response-representation} + +The impulse response matrix is +\\[g(t) = \begin{cases} +0 & t < 0 \\\\\\ +C e^{At} B + D \delta(t) & t \geq 0 +\end{cases}\\] +The \\(ij\\)'th element of the impulse response matrix, \\(g\_{ij}(t)\\), represents the response \\(y\_i(t)\\) to an impulse \\(u\_j(t)=\delta(t)\\) for a systems with a zero initial state. + +With initial state \\(x(0) = 0\\), the dynamic response to an arbitrary input \\(u(t)\\) is +\\[y(t) = g(t)\*u(t) = \int\_0^t g(t-\tau)u(\tau)d\tau\\] + + +#### Transfer Function Representation - Laplace Transforms {#transfer-function-representation-laplace-transforms} + +The transfer function representation is unique and is defined as the Laplace transform of the impulse response. + +
+
+ +\\[ G(s) = \int\_0^\infty g(t)e^{-st}dt \\] + +
+ +We can also obtain the transfer function representation from the state-space representation by taking the Laplace transform of the state-space equations +\\[ s x(s) = A x(s) + B u(s) \ \Rightarrow \ x(s) = (sI-A)^{-1} B u(s) \\] +\\[ y(s) = C x(s) + D u(s) \ \Rightarrow \ y(s) = \underbrace{\left(C(sI-A)^{-1}B+D\right)}\_{G(s)}u(s) \\] + +Time delays and improper systems can be represented by Laplace transforms, but do not have a state-space representation. + + +#### Coprime Factorization {#coprime-factorization} + +
+
+ +\\[G(s) = N\_r(s) M\_r^{-1}(s)\\] +where \\(N\_r(s)\\) and \\(M\_r(s)\\) are stable coprime transfer functions. + +
+ +The stability implies that \\(N\_r(s)\\) should contains all the RHP-zeros of \\(G(s)\\), and \\(M\_r(s)\\) should contain as RHP-zeros all the RHP-poles of \\(G(s)\\). +Mathematically, coprimeness means that there exist stable \\(U\_r(s)\\) and \\(V\_r(s)\\) such that the Bezout identity is satisfied: \\(U\_r N\_r + V\_r M\_r = I\\) + + +### State Controllability and State Observability {#state-controllability-and-state-observability} + +There are **many ways to check for state controllability and observability**, e.g. with Gramians, input/output pole vectors, controllability/observability matrix, etc. + + +##### Input and output pole vectors {#input-and-output-pole-vectors} + +The method which yields the most insight is probably to compute the input and output directions associated with each pole (mode). + +For the case when \\(A\\) has distinct eigenvalues, we have the following dyadic expansion of the transfer function matrix from inputs to outputs +\\[G(s) = \sum\_{i=1}^{n} \frac{C t\_i q\_i^H B}{s - \lambda\_i} + D = \sum\_{i=1}^{n} \frac{y\_{p\_i} u\_{p\_i}}{s - \lambda\_i} + D\\] + +- The \\(i\\)'th **input pole vector** \\(u\_{p\_i} \triangleq q\_i^H B\\) is an indication of how much the \\(i\\)'th mode is excited (and thus may be "controlled") by the inputs. +- The \\(i\\)'th **output pole vector** \\(y\_{p\_i} \triangleq C t\_i\\) indicates how much the \\(i\\)'th mode is observed in the outputs. + + +##### State Controllability {#state-controllability} + +Let \\(\lambda\_i\\) be the \\(i^{\text{th}}\\) eigenvalue of \\(A\\), \\(q\_i\\) the corresponding left eigenvector (\\(q\_i^H A = \lambda\_i q\_i^H\\)), and \\(u\_{p\_i} = B^H q\_i\\) the \\(i^{\text{th}}\\) input pole vector. Then the system \\((A, B)\\) is state controllable if and only if +\\[u\_{p\_i} \neq 0, \forall i\\] +That is if and only if all its input pole vectors are nonzero. + + +##### State Observability {#state-observability} + +Let \\(\lambda\_i\\) be the \\(i^{\text{th}}\\) eigenvalue of \\(A\\), \\(t\_i\\) the corresponding right eigenvector (\\(A t\_i = \lambda\_i t\_i\\)), and \\(y\_{p\_i} = C t\_i\\) the \\(i^{\text{th}}\\) output pole vector. Then the system \\((A, C)\\) is state observable if and only if +\\[y\_{p\_i} \neq 0, \forall i\\] +That is if and only if all its output pole vectors are nonzero. + + +##### Minimal realization {#minimal-realization} + +A state space realization \\((A, B, C, D)\\) of \\(G(s)\\) is said to be a minimal realization of \\(G(s)\\) if \\(A\\) has the smallest possible dimension. The smallest dimension is called the **McMillan degree** of \\(G(s)\\). A mode is hidden if it is not state controllable or observable and thus does not appear in the minimal realization. +It follows that a state-space realization is minimal if and only if \\((A, B)\\) is state controllable and \\((A, C)\\) is state observable. + + +### Stability {#stability} + +
+
+ +A system is (internally) stable is none of its components contain hidden unstable modes and the injection of bounded external signals at any place in the system result in bounded output signals measured anywhere in the system. + +
+ +
+
+ +A system is (state) **stabilizable** if all unstable modes are state controllable. +A system is (state) **detectable** if all unstable modes are state observable. + +A system with unstabilizable or undetectable modes is said to contain hidden unstable modes. + +
+ + +### Poles {#poles} + +
+
+ +The poles \\(p\_i\\) of a system with state-space description are the **eigenvalues** \\(\lambda\_i(A), i=1, \dotsc, n\\) of the matrix \\(A\\). +The **pole or characteristic polynomial** \\(\phi(s)\\) is defined as \\(\phi(s) \triangleq \det(sI-A) = \Pi\_{i=1}^n (s-p\_i)\\). +Thus the poles are the roots or the characteristic equation +\\[\phi(s) \triangleq \det(sI-A) = 0\\] + +
+ + +#### Poles and Stability {#poles-and-stability} + +A linear dynamic system is **stable if and only if all the poles are in the LHP**, that is, \\(\text{Re}\\{\lambda\_i(A)\\} < 0, \forall i\\) + + +#### Poles from Transfer Functions {#poles-from-transfer-functions} + +The pole polynomial \\(\phi(s)\\) corresponding to a minimal realization of a system with transfer function \\(G(s)\\) is the **least common denominator** of all non-identically-zero minors of all orders of \\(G(s)\\). + +The poles are essentially the sum of the poles in the elements of the transfer function, but to get the correct multiplicity a more careful analysis is needed. + + +#### Pole Vectors and Directions {#pole-vectors-and-directions} + +In multivariable system poles have **directions** associated with them. To quantify this, we use the **input and output pole vectors**. + +
+
+ +\\[ u\_{p\_i} = B^H q\_i \\] +With \\(q\_i\\) the left eigenvector of \\(A\\) (\\({q\_i}^T A = \lambda\_i {q\_i}^T\\)). +The input pole direction is \\(\frac{1}{\normtwo{u\_{p\_i}}} u\_{p\_i}\\) + +
+ +
+
+ +\\[ y\_{p\_i} = C t\_i \\] +With \\(t\_i\\) the right eigenvector of \\(A\\) (\\(A t\_i = \lambda\_i t\_i\\)). +The output pole direction is \\(\frac{1}{\normtwo{y\_{p\_i}}} y\_{p\_i}\\) + +
+ +The pole directions may be defined in terms of the transfer function matrix by evaluating \\(G(s)\\) at the pole \\(p\_i\\) and considering the directions of the resulting complex matrix \\(G(p\_i)\\). The matrix is infinite in the direction of the pole, and we may write +\\[ G(p\_i) u\_{p\_i} = \infty \cdot y\_{p\_i} \\] +where \\(u\_{p\_i}\\) is the input pole direction and \\(y\_{p\_i}\\) is the output pole direction. + +The pole directions may in principle be obtained from an SVD of \\(G(p\_i) = U\Sigma V^H\\). +Then \\(u\_{p\_i}\\) is the first column in \\(V\\) (corresponding to the maximum singular value) and \\(y\_{p\_i}\\) the first column in \\(U\\). + +The pole direction is usually very interesting because it gives information about which output (or combination of outputs) may be difficult to control. + + +### Zeros {#zeros} + +Zeros of a system arise when competing effects, internal to the system, are such that the output is zero even when the inputs (and the states) are not themselves identically zero. + +
+
+ +\\(z\_i\\) is a zero of \\(G(s)\\) if the rank of \\(G(z\_i)\\) is less than the normal rank of \\(G(s)\\). +The zero polynomial is defined as \\(z(s) = \Pi\_{i=1}^{n\_z}(s-z\_i)\\) where \\(n\_z\\) is the number of finite zeros of \\(G(s)\\) + +
+ + +#### Zeros from State-Space Realizations {#zeros-from-state-space-realizations} + +The state-space equations of a system may be written as +\\[P(s) \colvec{x\\u} = \colvec{0\\y}, \quad P(s) = \begin{bmatrix} +sI-A & -B \\\\\\ +C & D \\\\\\ +\end{bmatrix}\\] + +The zeros are then the values \\(s=z\\) for which the polynomial system matrix, \\(P(s)\\), loses rank, resulting in zero output for some non-zero input. + + +#### Zeros from Transfer Functions {#zeros-from-transfer-functions} + +The zero polynomial \\(z(s)\\), corresponding to a minimal realization of the system, is the greatest divisor of all the numerator of all order-\\(r\\) minors of \\(G(s)\\), where \\(r\\) is the normal rank of \\(G(s)\\), provided that these minors have been adjusted in such a way as to have the pole polynomial \\(\phi(s)\\) as their denominator. + +The zeros are values of \\(s\\) for which \\(G(s)\\) looses rank. In general, there is no relationship between the elements of the transfer function and its (multivariable) zeros. + + +#### Zero Directions {#zero-directions} + +Let \\(G(s)\\) have a zero at \\(s=z\\). Then \\(G(s)\\) loses rank at \\(s=z\\), and there will exist non-zero vectors \\(u\_z\\) and \\(y\_z\\) such that +\\[G(z) u\_z = 0 \cdot y\_z\\] +Here \\(u\_z\\) is defined as the **input zero direction** and \\(y\_z\\) is defined as the **output zero direction**. + +From a practical point of view, \\(y\_z\\) is usually of more interest than \\(u\_z\\) because it give information about **which combination of outputs may be difficult to control**. + +Again, we may obtain input and output zero directions from an SVD of \\(G(s)\\): \\(u\_z\\) is the last column of \\(U\\) and \\(y\_z\\) is the last column of \\(V\\) (corresponding to the zero singular value of \\(G(z)\\)). + + +### Some Remarks on Poles and Zeros {#some-remarks-on-poles-and-zeros} + +- We should always find a **minimal realization** of the system **before computing the zeros**. +- For a square system \\(G(s)\\), the poles and zeros are _essentially_ the poles and zeros of \\(\det G(s)\\). +- Poles and zeros can occurs at the same location, but their directions may be different so they do not cancel or otherwise interact with each other. +- If \\(G^{-1}(s)\\) exists, then the poles of \\(G(s)\\) are the zeros of \\(G^{-1}(s)\\) and vice versa (as for SISO systems). +- Zeros usually appear when there are fewer inputs or outputs than states or when \\(D \neq 0\\) +- **Moving poles and zeros**: + - **Feedback**: \\(G(I+GK)^{-1}\\). Poles (of \\(G\\)) are moved and zeros (of \\(G\\)) are unchanged (in addition we get as zeros the poles of \\(K\\)) + - **Series**: \\(GK\\). Poles and zeros are unchanged (with the exception of possible cancellations between poles and zeros in \\(G\\) and \\(K\\)) + - **Parallel**: \\(G+K\\). Poles are unchanged, zeros are moved (but note that physically a parallel interconnection requires an additional manipulated input) +- **Pinned zeros**. A zero is pinned to a subset of the outputs if \\(y\_z\\) has one or more elements equal to zero. Their effect cannot be moved freely to any output. Similarly, a zero is pinned to certain input if \\(u\_z\\) has one or more elements equal to zero. + +
+
+ +Consider a SISO negative feedback system with plant \\(G(s)=\frac{z(s)}{\phi(s)}\\) and a constant gain controller, \\(K(s)=k\\). The closed-loop response from reference \\(r\\) to output \\(y\\) is +\\[T(s) = \frac{kG(s)}{1+kG(s)} = \frac{kz(s)}{\phi(s)+kz(s)} = k\frac{z\_{\text{cl}}(s)}{\phi\_{\text{cl}}(s)}\\] + +We note that: + +- The zero locations are unchanged by feedback +- The pole locations are changed by feedback + +\begin{align\*} +\phi\_{\text{cl}(s)} &\underset{k \rightarrow 0}{\longrightarrow} \phi(s) \\\\\\ +\phi\_{\text{cl}(s)} &\underset{k \rightarrow \infty}{\longrightarrow} k z(s) +\end{align\*} + +That is, as we increase the feedback gain, **the closed loop poles moves from open-loop poles to the open-loop zeros**. +RHP-zeros therefore imply high gain instability. + +
+ + +### Internal Stability of Feedback Systems {#internal-stability-of-feedback-systems} + + + +{{< figure src="/ox-hugo/skogestad07_classical_feedback_stability.png" caption="Figure 9: Block diagram used to check internal stability" >}} + +Assume that the components \\(G\\) and \\(K\\) contain no unstable hidden modes. Then the feedback system in Fig. [fig:block_diagram_for_stability](#fig:block_diagram_for_stability) is **internally stable** if and only if all four closed-loop transfer matrices are stable. + +\begin{align\*} + &(I+KG)^{-1} & -K&(I+GK)^{-1} \\\\\\ +G&(I+KG)^{-1} & &(I+GK)^{-1} +\end{align\*} + +Assume there are no RHP pole-zero cancellations between \\(G(s)\\) and \\(K(s)\\), the feedback system in Fig. [fig:block_diagram_for_stability](#fig:block_diagram_for_stability) is internally stable if and only if **one** of the four closed-loop transfer function matrices is stable. + + +### Stabilizing Controllers {#stabilizing-controllers} + +The **Q-parameterization** is a parameterization that generates all controllers that yield internal stability of the closed loop transfer function. + +
+
+ +For stable plants, a parameterization of all stabilizing negative feedback controllers for the stable plant \\(G(s)\\) is given by +\\[K = (I-QG)^{-1} Q = Q(I-GQ)^{-1}\\] +where the parameter \\(Q\\) is any stable transfer function matrix. + +
+ +This may have significant advantages in controller synthesis where the objective is to a find a \\(K\\) which minimizes some norm of \\(N(K)\\). +The search over stabilizing \\(K\\) (which involves checking the stability of closed-loop transfer functions) is replaced by a search over stable \\(Q\\). +The closed-loop transfer functions turn out to be affine in \\(Q\\), e.g. \\(S\\) or \\(T\\) can be written \\(H1 + H2 Q H3\\), which may significantly simplify the optimization (e.g. compared to \\(GK(I+GK)^{-1}\\) which is fractional in \\(K\\)). + + +### Stability Analysis in the Frequency Domain {#stability-analysis-in-the-frequency-domain} + +
+
+ +Let \\(P\_{ol}\\) denote the number of unstable poles in \\(L(s) = G(s)K(s)\\). The closed-loop system with loop transfer \\(L(s)\\) and negative feedback is stable if and only if the Nyquist plot of \\(\det(I+L(s))\\): + +1. makes \\(P\_{ol}\\) anti-clockwise encirclements of the origin +2. does not pass through the origin + +
+ +
+
+ +The spectral radius \\(\rho(L(j\w))\\) is defined as the maximum eigenvalue magnitude: +\\[ \rho(L(j\w)) \triangleq \max\_{i} \abs{\lambda\_i (L(j\w))} \\] + +
+ +
+
+ +Consider a system with a stable loop transfer function \\(L(s)\\). Then the closed-loop system is stable if +\\[ \rho(L(j\w)) < 1 \quad \forall \w \\] + +
+ +
+
+ +Consider a system with a stable loop transfer function \\(L(s)\\). Then the closed-loop system is stable if +\\[ \norm{L(j\w)} < 1 \quad \forall \w\\] +Where \\(\norm{L}\\) denotes any matrix norm that satisfies the multiplicative property \\(\norm{AB} \leq \norm{A}\cdot\norm{B}\\) + +
+ +The Small gain theorem for SISO system says that the system is stable if \\(\abs{L(j\w)} < 1\\) at all frequencies \\(\w\\). This is clearly a **very conservative condition** as no phase information is taken into account. + +This may be understood as follows: the signals which "return" in the same direction after "one turn around the loop" are magnified by the eigenvalues \\(\lambda\_i\\) (and the directions are the eigenvectors \\(x\_i\\)): + +\\[ L x\_i = \lambda\_i x\_i \\] + +So if all the eigenvalues \\(\lambda\_i\\) are less than 1 in magnitude, all signals become smaller after each round, and the closed-loop system is stable. + + +### System Norms {#system-norms} + + +#### \\(\htwo\\) norm {#htwo--norm} + +
+
+ +Consider a strictly proper system \\(G(s)\\). The \\(\htwo\\) norm is: + +\begin{align\*} +\normtwo{G(s)} &\triangleq \sqrt{\frac{1}{2\pi} \int\_{-\infty}^{\infty} \text{tr}\left(G(j\w)^HG(j\w)\right) d\w} \\\\\\ + & = \sqrt{\frac{1}{2\pi} \int\_{-\infty}^{\infty} \sum\_i {\sigma\_i}^2(G(j\w)) d\w} +\end{align\*} + +
+ +The \\(\htwo\\) norm can have a stochastic interpretation where we measure the **expected root mean square value of the output in response to white noise excitation**. + + +#### \\(\hinf\\) norm {#hinf--norm} + +
+
+ +Consider a proper linear stable system \\(G(s)\\). The \\(\hinf\\) norm is the peak value of its maximum singular value: +\\[ \hnorm{G(s)} \triangleq \max\_{\w} \maxsv(G(j\w)) \\] + +
+ +The \\(\hinf\\) norm has several interpretations in the time and frequency domains: + +- it is the peak of the transfer function magnitude +- by introducing weights, it can be interpreted as the **magnitude of the some closed-loop transfer function relative to an upper bound** +- it is the worst case steady-state gain for sinusoidal inputs at any frequency +- it is equal to the 2-norm in the time domain: + +\\[ \hnorm{G(s)} = \max\_{w(t) \neq 0} \frac{\normtwo{z(t)}}{\normtwo{w(t)}} = \max\_{\normtwo{w(t)} = 1} \normtwo{z(t)} \\] + +- is has an interpretation as an induced norm in terms of the expected values of stochastic signals + + +#### Difference Between the \\(\htwo\\) and \\(\hinf\\) norms {#difference-between-the--htwo--and--hinf--norms} + +Minimizing the \\(\hinf\\) norm corresponds to minimizing the peak of the largest singular value, whereas minimizing the \\(\htwo\\) norm corresponds to minimizing the sum of the square of all the singular values over all frequencies. + +
+
+ +The \\(\hinf\\) norm is **convenient for representing unstructured model uncertainty** and because if satisfies the multiplicative property \\(\hnorm{A(s)B(s)} \leq \hnorm{A(s)} \cdot \hnorm{B(s)}\\) +It follows that the \\(\hinf\\) norm is an **induced norm**. + +
+ +The \\(\htwo\\) norm on the other hand is not and induced norm and does not satisfies the multiplicative property. +This implies that we cannot, by evaluating the \\(\htwo\\) norm of the individual components say anything about how their series interconnection will behave. + + +#### Hankel norm {#hankel-norm} + +The Hankel norm of a stable system \\(G(s)\\) is obtained when one applies an input \\(w(t)\\) up to \\(t=0\\) and measures the output \\(z(t)\\) for \\(t>0\\), and selects \\(w(t)\\) to maximize the ratio of the 2-norms: +\\[ \left\\|G(s)\right\\|\_H \triangleq \max\_{w(t)} \frac{\sqrt{\int\_{0}^{\infty} \normtwo{z(\tau)}^2 d\tau }}{\sqrt{\int\_{-\infty}^0 \normtwo{w(\tau)}^2 d\tau}} \\] +The Hankel norm is a kind of induced norm from past inputs to future outputs. + +It may be shown that the Hankel norm is equal to \\(\left\\|G(s)\right\\|\_H = \sqrt{\rho(PQ)}\\) where \\(\rho\\) is the spectral radius, \\(P\\) is the controllability Gramian and \\(Q\\) the observability Gramian. + + +## Limitations on Performance in SISO Systems {#limitations-on-performance-in-siso-systems} + + + + +### Input-Output Controllability {#input-output-controllability} + +
+
+ +The input-output controllability is the **ability to achieve acceptable control performance**; that is, to keep the outputs (\\(y\\)) within specified bounds from their references (\\(r\\)), in spite of unknown but bounded variations, such as disturbances (\\(d\\)) and plant changes, using available inputs (\\(u\\)) and available measurements (\\(y\_m\\)). + +
+ +A plant is controllable if there **exists** a controller that yields acceptable performance for all expected plant variation. Thus, **controllability is independent of the controller and is a property of the plant alone**. +It may be affected by changing the plant itself: + +- changing the mechanical design +- relocating sensors and actuators +- adding new equipment to dampen disturbances +- adding extra sensor or actuators +- changing the configuration of the lower layers of control already in place + +
+
+ +Input-output controllability analysis is applied to a plant to find out **what control performance can be expected**. + +It is also called **performance targeting**. + +
+ +If the system has been **scaled**, the requirement for acceptable performance is: +For any disturbance \\(\abs{d} \leq 1\\) and any reference \\(\abs{r} \leq R\\), the **performance requirement** is to keep the control error \\(\abs{e} \leq 1\\) using an input \\(\abs{u} \leq 1\\). + + +### Perfect Control and Plant Inversion {#perfect-control-and-plant-inversion} + +To obtain insight into the inherent limitations on performance, let's consider the **input needed to achieve perfect control**. +Let the plant model be: \\(y = G u + G\_d d\\) +Since we want perfect control, \\(y = r\\) and we have \\(u = G^{-1} r - G^{-1} G\_d d\\) that represents a perfect feedforward controller. + +For a feedback control, \\(u = K(r - y)\\), and we have \\(u = KS r - KSG\_d d\\) that we can rewrite \\(u = G^{-1}Tr - G^{-1}TG\_d d\\). + +We see that at frequency where feedback is effective (\\(T \approx I\\)), the input generated by feedback is the same as the perfect control input. That is, **high gain feedback generates an inverse of \\(G\\)**. + +Perfect control requires the controller to somehow generate an inverse of \\(G\\). Perfect control cannot be achieved if: + +- \\(G\\) contains RHP-zeros (since then \\(G^{-1}\\) is unstable) +- \\(G\\) contains time delay (since then \\(G^{-1}\\) contains non-causal prediction) +- \\(G\\) has more pole than zero (since then \\(G^{-1}\\) is unrealizable) + +The required input must not exceed maximum physically allowed value (\\(\abs{u} \leq 1\\)), therefore perfect control cannot be achieve if: + +- \\(\abs{G^{-1} G\_d}\\) is large (\\(\geq 1\\)) +- \\(\abs{G^{-1} R}\\) is large (\\(\geq 1\\)) + + +### Constrain of \\(S\\) and \\(T\\) {#constrain-of--s--and--t} + + +#### \\(S\\) Plus \\(T\\) is One {#s--plus--t--is-one} + +
+
+ +From the definitions \\(S = (I + L)^{-1}\\) and \\(T = L(I+L)^{-1}\\) we derive + +\begin{equation} + S + T = I +\end{equation} + +
+ +Ideally, we want \\(S\\) small to obtain small control error for commands and disturbances, and \\(T\\) small to avoid sensitivity to noise. There requirements are not simultaneously possible at any frequency. + + +#### The Waterbed Effects {#the-waterbed-effects} + +In general, a trade-off between sensitivity reduction and sensitivity increase must be performed whenever: + +1. \\(L(s)\\) has at least two more poles than zeros (first waterbed formula) +2. \\(L(s)\\) has a RHP-zero (second waterbed formula) + +
+
+ +Suppose that the open-loop transfer function \\(L(s)\\) is rational and has at least two more poles than zeros. +Suppose also that \\(L(s)\\) has \\(N\_p\\) RHP-poles at locations \\(p\_i\\). +Then for closed-loop stability, the sensitivity function must satisfy the following **Bode Sensitivity Integral**: + +\begin{equation} + \int\_0^\infty \ln\abs{S(j\w)} d\w = \pi \sum\_{i=1}^{N\_p} \text{Re}(p\_i) +\end{equation} + +
+ +For a **stable plant**, we must have: + +\begin{equation} + \int\_0^\infty \ln\abs{S(j\w)} d\w = 0 +\end{equation} + +The area of sensitivity reduction (\\(\ln\abs{S}\\) negative) must equal the area of sensitivity increase (\\(\ln\abs{S}\\) positive): **the benefits and costs of feedback are balanced**. + +For **unstable plant**, the presence of unstable poles usually increase the peak of \\(\abs{S}\\) as seen from the contribution \\(\pi \sum\_{i=1}^{N\_p} \text{Re}(p\_i)\\). This is the price to pay for stabilizing the system. + +From the first waterbed formula, we expect that an increase in the bandwidth must come at the expense of a large peak in \\(\abs{S}\\). +Although this is true in most practical cases, however this is not strictly implied by the formula. +This is because the increase in area may happen over an infinite frequency range. + +
+
+ +Suppose that \\(L(s)\\) has a single real **RHP-zero** \\(z\\) or a complex conjugate pair of zero \\(z=x\pm jy\\), and has \\(N\_p\\) RHP-poles \\(p\_i\\). +For closed-loop stability, the sensitivity function must satisfy +\\[ \int\_0^\infty \ln\abs{S(j\w)} w(z, \w) d\w = \pi \ln \sum\_{i=1}^{N\_p} \abs{\frac{p\_i + z}{\bar{p\_i}-z}} \\] + +where if the zero is real +\\[ w(z, \w) = \frac{2z}{z^2 + \w^2} \\] +and if the zero pair is complex +\\[ w(z, \w) = \frac{x}{x^2 + (y-\w)^2} + \frac{x}{x^2 + (y+\w)^2} \\] + +
+ +The second waterbed formula implies that the peak of \\(\abs{S}\\) is even higher for plants with RHP-zeros. + +The weight \\(w(z, \w)\\) effectively "cuts off" the contribution from \\(\ln\abs{S}\\) to the integral at frequencies \\(\w > z\\). +So we have approximately: +\\[ \int\_0^z \ln \abs{S(j\w)} d\w \approx 0 \\] + +This is similar to the Bode sensitivity integral, except that the trade-off is done over a limited frequency range. +Thus, a large peak for \\(\abs{S}\\) is unavoidable if we try to push down \\(\abs{S}\\) at low frequencies. + + +#### Interpolation Constraints {#interpolation-constraints} + +
+
+ +If \\(p\\) is a **RHP-pole** of the loop transfer function \\(L(s)\\) then + +\begin{equation} + T(p) = 1, \quad S(p) = 0 +\end{equation} + +If \\(z\\) is a **RHP-zero** of the loop transfer function \\(L(s)\\) then + +\begin{equation} + T(z) = 0, \quad S(z) = 1 +\end{equation} + +
+ + +#### Sensitivity Peaks {#sensitivity-peaks} + +
+
+ +Suppose \\(f(s)\\) is stable, then the maximum value of \\(\abs{f(s)}\\) for \\(s\\) in the RHP is attained on the region's boundary (somewhere along the \\(j\w\\)-axis): +\\[ \hnorm{f(j\w)} = \max\_{\omega} \abs{f(j\w)} \geq \abs{f(s\_0)} \quad \forall s\_0 \in \text{RHP} \\] + +
+ +We can derive the following bounds on the peaks of \\(S\\) and \\(T\\) from the maximum modulus principle: +\\[ \hnorm{S} \geq \max\_{j} \prod\_{i=1}^{N\_p} \frac{\abs{z\_j + \bar{p\_i}}}{\abs{z\_j - p\_i}} \quad \hnorm{T} \geq \max\_{i} \prod\_{j=1}^{N\_z} \frac{\abs{\bar{z\_j} + p\_i}}{\abs{z\_j - p\_i}} \\] + +This shows that **large peaks** for \\(\abs{S}\\) and \\(\abs{T}\\) are unavoidable if we have a **RHP-zero and RHP-pole located close to each other**. + + +### Limitation Imposed by Time Delays {#limitation-imposed-by-time-delays} + +Consider a plant \\(G(s)\\) that contains a time delay \\(e^{-\theta s}\\). Even the "ideal" controller cannot remove this delay and the "ideal" sensitivity function is \\(S = 1 - T = 1 - e^{-\theta s}\\). + +
+
+ +\\(S\\) crosses 1 at a frequency of about \\(1/\theta\\), so we expect to have an upper bound on \\(\w\_c\\): +\\[ \w\_c < 1/\theta \\] + +
+ + +### Limitation Imposed by RHP-Zeros {#limitation-imposed-by-rhp-zeros} + +RHP-zeros typically appear when we have **competing effects of slow and fast dynamics**. Their presence induces many limitations. + + +#### Inverse Response {#inverse-response} + +We can show that the output of a step change in the input of a stable plant with \\(n\_z\\) real RHP-zeros will cross zero \\(n\_z\\) times, that is, we have **inverse response**. + + +#### High Gain Instability {#high-gain-instability} + +It is well known that the closed-loop poles migrate from the open-loop poles to the open-loop zeros as the feedback gain increases. Thus **the presence of RHP-zeros implies high-gain instability**. + + +#### Bandwidth Limitation {#bandwidth-limitation} + +To derive bounds for the bandwidth, we select performance weight \\(w\_P(s)\\) and we then use the interpolation constraint \\(S(z) = 1\\). + +We require \\(\abs{S(j\w)} < 1/\abs{w\_P(j\w)} \quad \forall \w\\), so we must at least require that the weight satisfies \\(\abs{w\_P(z)} < 1\\). + + +##### Performance at low frequencies {#performance-at-low-frequencies} + +If we specify performance at low frequencies, we may use the following weight: +\\[ w\_P = \frac{s/M + \w\_B^\*}{s + \w\_B^\* A} \\] +Where \\(\w\_B^\*\\) is the minimum wanted bandwidth, \\(M\\) the maximum peak of \\(\abs{S}\\) and \\(A\\) the steady-state offset. + +If we consider a **real RHP-zero**: +\\[ \w\_B^\* < z \frac{1 - 1/M}{1 - A} \\] +For example, with \\(A=0\\) and \\(M=2\\), we must at least require \\(\w\_B^\* < 0.5z\\). + +If we consider an **imaginary RHP-zero**: +\\[ \w\_B^\* < \abs{z} \sqrt{1 - \frac{1}{M^2}} \\] +For example, with \\(M=2\\), we must at least require \\(\w\_B^\* < 0.86\abs{z}\\). + +
+
+ +The presence of RHP-zero imposes an **upper bound on the achievable bandwidth** when we want tight control at low frequencies + +
+ + +##### Performance at high frequencies {#performance-at-high-frequencies} + +We consider the case where we want **tight control at high frequencies**, by use of the performance weight: +\\[ w\_P = \frac{1}{M} + \frac{s}{\w\_B^\*} \\] + +If we consider a **real RHP-zero**: +\\[ \w\_B^\* > z \frac{1}{1-1/M} \\] +For example, with \\(M=2\\) the requirement is \\(\w\_B^\* > 2z\\), so we can only achieve tight control at frequencies beyond the frequency of the RHP-zero. + +
+
+ +The presence of RHP-zero imposes and **lower bound on the achievable bandwidth** when we want tight control at high frequencies + +
+ + +### Limitation Imposed by RHP-Poles {#limitation-imposed-by-rhp-poles} + +For unstable plants with a RHP-pole at \\(s = p\\), we **need** feedback for stabilization. + +
+
+ +In presence of a RHP-pole at \\(s=p\\): +\\[ \hnorm{KS} \geq \abs{G\_s(p)^{-1}} \\] +where \\(G\_s\\) is the "stable version" of \\(G\\) with its RHP-poles mirrored into the LHP. + +Since \\(u = -KS(G\_d d + n)\\) and because of the previous inequality, the presence of disturbances \\(d\\) and measurement noise \\(n\\) may require the input \\(u\\) to saturate. +When the inputs saturate, the system is practically open-loop and the **stabilization is not possible**. + +
+ +
+
+ +We need to react sufficiently fast. +For a real RHP-pole \\(p\\) we must require that the closed-loop bandwidth is larger than \\(2p\\). +The presence of **RHP-poles generally imposes a lower bound on the bandwidth**. + +
+ + +### Combined Unstable (RHP) Poles and Zeros {#combined-unstable--rhp--poles-and-zeros} + +A strictly proper plant with a single real RHP-zero \\(z\\) and a single real RHP-pole \\(p\\) can be stabilized by a stable proper controller if and only if \\(z>p\\). In words "the system may go unstable before we have time to react". + +In order to achieve acceptable performance and robustness, we must approximately require \\(z>4p\\). That is, we want to RHP-pole to be much lower than the RHP-zero. + +The presence of RHP-zeros (or time delays) make stabilization more difficult. + + +### Performance Requirements Imposed by Disturbances and Commands {#performance-requirements-imposed-by-disturbances-and-commands} + + +##### Disturbance rejection {#disturbance-rejection} + +Consider a single disturbance \\(d\\) and a constant reference \\(r=0\\). Without control, we have \\(e = G\_d d\\). +We conclude that no control is needed if \\(\abs{G\_d(j\w)} < 1\\) at all frequencies. In that case, the plant is said to be "**self-regulated**". + +If \\(\abs{G\_d(j\w)} > 1\\) at some frequency, then **we need control**. In case of feedback control, we have +\\[ e(s) = S(s)G\_d(s)d(s) \\] +The performance requirement \\(\abs{e(\w)} < 1\\) for any \\(\abs{d(\w)}\\) at any frequency is satisfied if and only if +\\[ \abs{S G\_d(j\w)} < 1 \quad \forall\w \quad \Leftrightarrow \quad \abs{S(j\w)} < 1/\abs{G\_d(j\w)} \quad \forall\w \\] + +
+
+ +If the plant has a RHP-zero at \\(s=z\\), then \\(S(z) = 1\\) and we have the following condition: +\\[ \abs{G\_d(z)} < 1 \\] + +
+ +
+
+ +We also have that +\\[ \w\_B > \w\_d \\] +where \\(\w\_d\\) is defined by \\(\abs{G\_d(j\w\_d)} = 1\\). + +
+ +The actual bandwidth requirement imposed by disturbances may be higher than \\(\w\_d\\) if \\(\abs{G\_d(j\w)}\\) drops with a slope steeper than \\(-1\\) just before the frequency \\(\w\_d\\). This is because we cannot let the slope of \\(\abs{L(j\w)}\\) around the crossover be much larger than \\(-1\\) due to stability margins. It is however possible to overcome this issue using local feedback loops in series. + + +##### Command tracking {#command-tracking} + +Assume than \\(d=0\\) and \\(r(t) = R\sin(\w t)\\). For acceptable control (\\(\abs{e} < 1\\)) we must have +\\[ \abs{S(j\w)R}<1 \quad \forall\w\leq\w\_r \\] +where \\(\w\_r\\) is the frequency up to which performance tracking is required. + + +### Limitation Imposed by Input Constraints {#limitation-imposed-by-input-constraints} + +
+
+ +To achieve acceptable control (\\(\abs{e}<1\\)) and avoid input saturation (\\(\abs{u}<1\\)), we must require: + +For **disturbance rejection**: +\\[ \abs{G} > \abs{G\_d} - 1 \text{ at frequencies where } \abs{G\_d} > 1 \\] + +For **command tracking**: +\\[ \abs{G} > \abs{R} - 1 \quad \forall \w \leq \w\_r \\] + +
+ + +### Limitation Imposed by Phase Lag {#limitation-imposed-by-phase-lag} + +Phase lag in the plant present no fundamental limitations, however is usually does on practical designs. + +
+
+ +Let define \\(\w\_u\\) as the frequency where the phase lag of the plant \\(G\\) is \\(\SI{-180}{\degree}\\) + +\begin{equation} + \angle G(j\w\_u) \triangleq \SI{-180}{\degree} +\end{equation} + +
+ +With simple controllers such as a proportional controller or a PI-controller, the phase lag does pose a fundamental limitation on the achievable bandwidth because of stability bounds: +\\[ \w\_c < \w\_u \\] + +However, if the model is exactly known and there are no RHP-zeros or time delays, one may extend \\(\w\_c\\) to infinite frequency by placing zeros in the controller at the plant poles. + + +### Limitation Imposed by Uncertainty {#limitation-imposed-by-uncertainty} + + +##### Uncertainty with feedforward control {#uncertainty-with-feedforward-control} + +Perfect control is obtained using a controller which generates the control input +\\[ u = G^{-1} r - G^{-1} G\_d d \\] +When we apply this perfect controller to the actual plant \\(y' = G' u + G\_d' d\\), we find +\\[ e' = y' - r = \underbrace{\left( \frac{G'}{G} - 1 \right)}\_{\text{rel. error in }G} r - \underbrace{\left( \frac{G'/G\_d'}{G/G\_d} - 1 \right)}\_{\text{rel. error in } G/G\_d} G\_d' d \\] +For feedforward control, **the model error propagates directly to the control error**. + +If we want acceptable control (\\(\abs{e'}<1\\)), we must require that the model error in \\(G/G\_d\\) is less than \\(1/\abs{G\_d'}\\). This is very difficult to satisfy at frequencies where \\(\abs{G\_d'}\\) is much larger than 1. + +The presence of uncertainty then requires us to use feedback control rather than just feedforward control. + + +##### Uncertainty with feedback control {#uncertainty-with-feedback-control} + +With feedback control, the closed-loop response is \\(e = y - r = S G\_d d - S r\\). +With model error, we get \\(y' - r = S'(G\_d'd - r)\\) where \\(S' = (I + G'K)^{-1}\\). +\\(S'\\) can be rewritten as \\(S' = S \frac{1}{1+ET}\\) with \\(E = \frac{G'-G}{G}\\) the relative error for \\(G\\). + +We see that the **control error in only weakly affected by model error at frequencies where feedback is effective** (\\(T \approx 1\\)). + +
+
+ +Uncertainty in the crossover frequency region can result in poor performance and even instability: + +- Uncertainty which keeps \\(\abs{G(j\w\_u)}\\) approximately constant will not change the gain margin. +- Uncertainty which increases \\(\abs{G(j\w\_u)}\\) may yield instability. + +
+ + +### Summary: Controllability Analysis with Feedback Control {#summary-controllability-analysis-with-feedback-control} + + + +{{< figure src="/ox-hugo/skogestad07_classical_feedback_meas.png" caption="Figure 10: Feedback control system" >}} + +Consider the control system in Fig. [fig:classical_feedback_meas](#fig:classical_feedback_meas). +Here \\(G\_m(s)\\) denotes the measurement transfer function and we assume \\(G\_m(0) = 1\\) (perfect steady-state measurement). + +
+
+ +1. **Speed of response to reject disturbances**. We approximately require \\(\w\_c > \w\_d\\). With feedback control we require \\(\abs{S(j\w)} \leq \abs{1/G\_d(j\w)} \quad \forall\w\\). +2. **Speed of response to track reference changes**. We require \\(\abs{S(j\w)} \leq 1/R\\) up to the frequency \\(\w\_r\\) where tracking is required. +3. **Input constraints arising from disturbances**. For acceptable control we require \\(\abs{G(j\w)} > \abs{G\_d(j\w)} - 1\\) at frequencies where \\(\abs{G\_d(j\w)} > 1\\). +4. **Input constraints arising from setpoints**. We require \\(\abs{G(j\w)} > R - 1\\) up to the frequency \\(\w\_r\\) where tracking is required. +5. **Time delay \\(\theta\\) in \\(G(s)G\_m(s)\\)**. We approximately require \\(\w\_c < 1/\theta\\). +6. **Tight control at low frequencies with a RHP-zero \\(z\\) in \\(G(s)G\_m(s)\\)**. For a real RHP-zero we require \\(\w\_c < z/2\\) and for an imaginary RHP-zero we approximately require \\(\w\_c < \abs{z}\\). +7. **Phase lag constraint**. We require in most practical cases \\(\w\_c < \w\_u\\). Here the ultimate frequency \\(\w\_u\\) is where \\(\angle GG\_m(j\w\_u) = \SI{-180}{\degree}\\). Since time delays and RHP-zeros also contribute to the phase lag, it is possible to combine the corresponding rules in the single rule \\(\w\_c < \w\_u\\). +8. **Real open-loop unstable pole in \\(G(s)\\) at \\(s=p\\)**. We need high feedback gains to stabilize the system and we approximately require \\(\w\_c > 2p\\). + +
+ +In summary: + +- rules 1, 2 and 8 tell us that we need high feedback gain in order to reject disturbances, to track setpoints and to stabilize the plant. +- rules 5, 6 and 7 tell us we must use low feedback gains in the frequency range where there are RHP-zeros or delays or where the plant has a lot of phase lag. + +Sometimes, the disturbances are so large that we hit input saturation or the required bandwidth is not achievable. To avoid the latter problem, we must at least require that the effect of the disturbance is less than \\(1\\) at frequencies beyond the bandwidth: +\\[ \abs{G\_d(j\w)} < 1 \quad \forall \w \geq \w\_c \\] + + + +{{< figure src="/ox-hugo/skogestad07_margin_requirements.png" caption="Figure 11: Illustration of controllability requirements" >}} + + +##### Controllability analysis with feedforward control {#controllability-analysis-with-feedforward-control} + +We find that essentially the same conclusions apply to feedforward control when relevant. + +A major difference is that a delay in \\(G\_d(s)\\) is an advantage for feedforward control ("it gives the feedforward controller more time to make the right action"). + + +### Conclusion {#conclusion} + +The controllability analysis is summarized in terms of **eight controllability rules**. +These rules are **necessary conditions to achieve acceptable control performance**. +They are not sufficient since among other things they only consider one effect at a time. +The rules may be used to **determine whether or not a given plant is controllable**. + + +## Limitations on Performance in MIMO Systems {#limitations-on-performance-in-mimo-systems} + + + + +### Introduction {#introduction} + +In a MIMO system, disturbances, the plant, RHP zeros, RHP poles, delays and disturbances have each **directions** associated with them. + +We quantify the directionality of the various effects in \\(G\\) and \\(G\_d\\) by their output directions: + +- \\(y\_z\\): output dir. of RHP-zero, \\(G(z) u\_z = 0 \cdot y\_z\\) +- \\(y\_p\\): output dir. of RHP-pole, \\(G(p\_i) u\_p = \infty \cdot y\_p\\) +- \\(y\_d\\): output dir. of disturbance, \\(y\_d(s) = \frac{1}{\normtwo{g\_d(s)}} g\_d(s)\\) +- \\(u\_i\\): i'th output dir. (singular vector) of the plant, \\(G(s) v\_i(s) = \sigma\_i(s) u\_i(s)\\) + +We may also consider input directions, however we are primarily concerned with the performance at the output of the plant. + +The **angle between various output directions** is quantified using their inner products. + +For example, the output angle between a pole and a zero is \\(\phi = \cos^{-1} \abs{y\_z^H y\_p}\\), and: + +- if \\(\phi = \SI{90}{\degree}\\), then the pole and zero are in completely different directions and there is no interaction (they may be considered separately) +- if \\(\phi = \SI{0}{\degree}\\), then they interact as in a SISO system + + +### Constraints on \\(S\\) and \\(T\\) {#constraints-on--s--and--t} + + +#### \\(S\\) plus \\(T\\) is the Identity Matrix {#s--plus--t--is-the-identity-matrix} + +From the identity \\(S + T = I\\), we get: + +\begin{subequations} + \begin{align} + |1 - \maxsv(S)| \leq \maxsv(T) \leq 1 + \maxsv(S)\\\\\\ + |1 - \maxsv(T)| \leq \maxsv(S) \leq 1 + \maxsv(T) + \end{align} +\end{subequations} + +This shows that we cannot have \\(S\\) and \\(T\\) small simultaneously and that \\(\maxsv(S)\\) is large if and only if \\(\maxsv(T)\\) is large. + + +#### Sensitivity Intregrals {#sensitivity-intregrals} + +The waterbed effect can be generalized for MIMO systems: + +\begin{align\*} +\int\_0^{\infty} \ln{|\det{S(j\w)}|} d\w &= \sum\_j \int\_0^\infty \ln{\sigma\_j(S(j\w))} d\w \\\\\\ + &= \pi \cdot \sum\_{i=1}^{N\_p} \text{Re}(p\_i) +\end{align\*} + + +#### Interpolation Constraints {#interpolation-constraints} + +The basis of many of the results in this chapter are the "**interpolation constraints**". + +
+
+ +If \\(G(s)\\) has a RHP-zero at \\(z\\) with output direction \\(y\_z\\), \\(T(s)\\) must have a RHP-zero at \\(z\\), i.e., \\(T(z)\\) has a zero gain in the direction of output direction \\(y\_z\\) of the zero, and we get +\\[ y\_z^H T(z) = 0 ; \quad y\_z^H S(z) = y\_z^H \\] + +
+ +
+
+ +If \\(G(s)\\) has a RHP-pole at \\(p\\) with output direction \\(y\_p\\), \\(S(s)\\) must have a RHP-zero at \\(p\\), i.e. \\(S(p)\\) has a zero gain in the input direction of the output direction \\(y\_p\\) of the RHP-pole, and we get +\\[ S(p) y\_p = 0 ; \quad T(p) y\_p = y\_p \\] + +
+ + +#### Sensitivity Peaks {#sensitivity-peaks} + +Consider a plant \\(G(s)\\) with RHP-poles \\(p\_i\\) and RHP-zeros \\(z\_j\\). +The factorization of \\(G(s)\\) in terms of **Blaschke products** is: +\\[ \tcmbox{G(s) = B\_p^{-1} G\_s(s), \quad G(s) = B\_z(s) G\_m(s)} \\] +where \\(G\_s\\) is the stable and \\(G\_m\\) the minimum-phase version of \\(G\\). +\\(B\_p\\) and \\(B\_z\\) are stable all-pass transfer matrices (all singular values are 1 for \\(s=j\w\\)) containing the RHP-poles and RHP-zeros respectively.
+ + +##### MIMO sensitivity peaks {#mimo-sensitivity-peaks} + +Suppose that \\(G(s)\\) has \\(N\_z\\) RHP-zeros \\(z\_j\\) with output directions \\(y\_{zj}\\), and \\(N\_p\\) RHP-poles \\(p\_i\\) with output direction \\(y\_{pi}\\). +We define the all-pass transfer matrices from the Blaschke factorization and compute the real constants: +\\[ c\_{1j} = \normtwo{y\_{zj}^H B\_p(z\_j)} \geq 1; \quad c\_{2i} = \normtwo{B\_z^{-1}(p\_i) y\_{pi}} \geq 1 \\] + +Let \\(w\_P(s)\\) be a stable weight. Then, for closed-loop stability the weighted sensitivity function must satisfy for each RPH-zero \\(z\_j\\) +\\[ \hnorm{w\_p S} \ge c\_{1j} \abs{w\_p(z\_j)} \\] + +Let \\(w\_T(s)\\) be a stable weight. Then, for closed-loop stability the weighted complementary sensitivity function must satisfy for each RPH-pole \\(p\_i\\) +\\[ \hnorm{w\_T T} \ge c\_{2j} \abs{w\_T(p\_i)} \\] + +
+
+ +By selecting \\(w\_P(s) = 1\\) and \\(w\_T(s) = 1\\), we get +\\[ \hnorm{S} \ge \max\_{\text{zeros } z\_j} c\_{1j}; \quad \hnorm{T} \ge \max\_{\text{poles } p\_i} c\_{2j} \\] + +
+ + +### Functional Controllability {#functional-controllability} + +
+
+ +An m-input l-output system \\(G(s)\\) is **functionally controllable** is the normal rank of \\(G(s)\\), denoted \\(r\\), is equal to the number of outputs (\\(r = l\\)), that is, if \\(G(s)\\) has full row rank. +A system is functionally uncontrollable if \\(r + +A square MIMO system is uncontrollable if and only if \\(\det{G(s)} = 0,\ \forall s\\). + +A plant is functionally uncontrollable if and only if \\(\sigma\_l(G(j\omega)) = 0,\ \forall\w\\). +\\(\sigma\_l(G(j\w))\\) is then a **measure of how close a plant is to being functionally uncontrollable**. + +
+
+ +If the plant is not functionally controllable (\\(r + +By analyzing the uncontrollable output directions, an engineer can decide on whether it is acceptable to keep certain output combinations uncontrolled, or if additional actuators are needed. + + +### Limitation Imposed by Time Delays {#limitation-imposed-by-time-delays} + +Time delays pose limitation also in MIMO systems. Let \\(\theta\_{ij}\\) denote the time delay in the \\(ij\\)'th element of \\(G(s)\\). Then a **lower bound on the time delay for output** \\(i\\) is given by the smallest delay in row \\(i\\) of \\(G(s)\\), that is +\\[ \theta\_i^{\min} = \min\_j \theta\_{ij} \\] + +For MIMO systems, we have the surprising result that an increase time delay may sometimes improve the achievable performance. The time delay may indeed increase the decoupling between the outputs. + + +### Limitations Imposed by RHP-Zeros {#limitations-imposed-by-rhp-zeros} + +The limitations imposed by RHP-zeros on MIMO systems are similar to those for SISO system, although they only apply in particular directions. + +The limitations of a RHP-zero located at \\(z\\) may be derived from the bound: +\\[ \hnorm{w\_P S(s)} = \max\_{\w} \abs{w\_P(j\w)} \maxsv(S(j\w)) \ge \abs{w\_P(z)} \\] + +All the results derived for SISO systems generalize if we consider the "worst" direction corresponding to the maximum singular value \\(\maxsv(S)\\). +For instance, if we choose \\(w\_P(s)\\) to require tight control at low frequencies, the bandwidth must satisfy \\(w\_B^\* < z/2\\).
+ +In MIMO systems, one can often **move the deteriorating effect of a RHP-zero to a given output** which may be less important to control well. +This is possible because, although the interpolation constraint \\(y\_z^H T(z) = 0\\) imposes a certain relationship between the elements within each column of \\(T(s)\\), the columns of \\(T(s)\\) may still be selected independently.
+ +Requiring a decoupled response from \\(r\\) to \\(y\\) generally leads to the introduction of additional RHP-zero in \\(T(s)\\) which are not present in \\(G(s)\\). +Moving the effect of the RHP-zero to a particular output generally add some interaction. Also, moving to RHP-zero in a direction where \\(y\_z\\) is small usually introduces more interaction than in a direction where \\(y\_z\\) is large. + +For example, if we have a RHP-zero with \\(y\_z = [0.03,\ -0.04,\ 0.9,\ 0.43]^T\\), then one may in theory move the bad effect of the RHP-zero to any of the outputs. However, in practice, it will be difficult to avoid the effect of the RHP-zero on output 3, because the zero direction is mainly in that output. Trying to move it somewhere else will give large interactions and poor performance. + + +### Limitation Imposed by Unstable (RHP) Poles {#limitation-imposed-by-unstable--rhp--poles} + +For unstable plants, feedback is needed for stabilization. More precisely, the presence of an unstable pole \\(p\\) requires for internal stability \\(T(p) y\_p = y\_p\\) where \\(y\_p\\) is the output pole direction. + +
+
+ +The transfer function \\(KS\\) from plant output to plant inputs must satisfy for any RHP-pole \\(p\\) +\\[ \hnorm{KS} \ge \normtwo{u\_p^H G\_s(p)^{-1}} \\] +where \\(u\_p\\) is the input pole direction, and \\(G\_s\\) is the "stable version" of \\(G\\) with its RHP-poles mirrored in the LHP. + +
+ +
+
+ +From the bound \\(\hnorm{w\_T(s) T(s)} \ge \abs{w\_T(p)}\\), we find that a RHP-pole \\(p\\) imposes restrictions on \\(\maxsv(T)\\) which are identical to those derived on \\(\abs{T}\\) for SISO systems. +Thus, we need to react sufficiently fast and we must require that \\(\maxsv(T(j\w))\\) is about 1 or larger up to the frequency \\(2 \abs{p}\\). + +
+ + +### RHP-poles Combined with RHP-Zeros {#rhp-poles-combined-with-rhp-zeros} + +For a MIMO plant with single RHP-zero \\(z\\) and single RHP-pole \\(p\\), we derive + +\\[ \hnorm{S} \ge c \quad \hnorm{T} \ge c \\] +\\[ \text{with } c = \sqrt{\sin^2 \phi + \frac{\abs{z + p}^2}{\abs{z-p}^2} \cos^2 \phi} \\] +where \\(\phi = cos^{-1} \abs{y\_z^H y\_p}\\) is the angle between the RHP-zero and the RHP-pole. + +Thus the angle between the RHP-zero and the RHP-pole is of great importance, we usually want \\(\abs{y\_z^H y\_p}\\) close to zero so that they don't interact with each other. + + +### Limitations Imposed by Disturbances {#limitations-imposed-by-disturbances} + +For SISO systems, we found that large and "fast" disturbances require tight control and a large bandwidth. +The same results apply for MIMO systems, but again the issue of **directions** is important. + +
+
+ +Consider a scalar disturbance \\(d\\) and let the vector \\(g\_d\\) represents its effect on the outputs (\\(y = g\_d d\\)). +The disturbance direction is defined as + +\begin{equation} + y\_d = \frac{1}{\normtwo{g\_d}} g\_d +\end{equation} + +For a plant with multiple disturbances, \\(g\_d\\) is a column of the matrix \\(G\_d\\). + +
+ +
+
+ +\begin{equation} + \gamma\_d (G) = \maxsv(G) \maxsv(G^\dagger y\_d) +\end{equation} + +where \\(G^\dagger\\) is the pseudo inverse of \\(G\\) + +
+ +The disturbance condition number provides a **measure of how a disturbance is aligned with the plant**. It may vary between 1 (for \\(y\_d = \bar{u}\\)) if the disturbance is in the "good" direction, and the condition number \\(\gamma(G) = \maxsv(G) \maxsv(G^\dagger)\\) (for \\(y\_d = \ubar{u}\\)) if it is in the "bad" direction.
+ +Let assume \\(r=0\\) and that the system has been scaled. With feedback control \\(e = S g\_d d\\) and the performance objective is +\\[ \normtwo{S g\_d} = \maxsv(S g\_d) < 1 \ \forall\w \quad \Leftrightarrow \quad \hnorm{S g\_d} < 1 \\] + +We derive bounds in terms of the singular values of \\(S\\): +\\[ \minsv(S) \normtwo{g\_d} \le \normtwo{S g\_d} \le \maxsv(S) \normtwo{g\_d} \\] + +
+
+ +For acceptable performance **we must at least require that** +\\[ \maxsv(I+L) > \normtwo{g\_d} \\] + +And **we may require that** +\\[ \minsv(I+L) > \normtwo{g\_d} \\] + +
+ +If \\(G(s)\\) has a **RHP-zero** at \\(s = z\\), then the **performance may be poor if the disturbance is aligned with the output direction of this zero**. +To satisfy \\(\hnorm{S g\_d} < 1\\), we must require +\\[ \abs{y\_z^H g\_d(z)} < 1 \\] +where \\(y\_z\\) is the direction of the RHP-zero. + + +### Limitations Imposed by Input Constraints {#limitations-imposed-by-input-constraints} + + +#### Inputs for Perfect Control {#inputs-for-perfect-control} + +We here consider the question: can the disturbances be rejected perfectly while maintaining \\(\\|u\\|<1\\)?
+ +For a square plant, the input needed for perfect disturbance rejection is \\(u = -G^{-1} G\_d d\\). + +For a single disturbance, as the worst-cast disturbance is \\(\abs{d(\w)} = 1\\), we get that input saturation is avoided (\\(\\|u\\|\_{\text{max}} \le 1\\)) if all elements in the vector \\(G^{-1} g\_d\\) are less than 1 in magnitude: +\\[ \\|G^{-1} g\_d\\|\_{\text{max}} < 1, \ \forall\w \\] + +It is first recommended to **consider one disturbance at a time** by plotting as a function of frequency the individual elements of \\(G^{-1} G\_d\\). This will yields more information about which particular input is most likely to saturate and which disturbance is the most problematic. + + +#### Inputs for Acceptable Control {#inputs-for-acceptable-control} + +We here consider the question: is it possible to achieve \\(\\|e\\|<1\\) while using inputs with \\(\\|u\\| \le 1\\)? + +For SISO systems, we have to required \\(\abs{G} > \abs{g\_d} - 1\\) at frequencies where \\(\abs{g\_d} > 1\\). +We would like to generalize this result to MIMO systems. + +
+
+ +Each singular value \\(\sigma\_i\\) of \\(G\\) must approximately satisfy: + +\begin{equation} + \sigma\_i(G) \ge \abs{u\_i^H g\_d} - 1 \text{ where } \abs{u\_i^H g\_d} > 1 +\end{equation} + +with \\(u\_i\\) the \\(i\\)'th output singular vector of \\(G\\). + +\\(u\_i^H g\_d\\) may be interpreted as the projection of \\(g\_d\\) onto the \\(i\\)'th output singular vector of the plant. + +
+ +Using the previous approximation, we can find out: + +- For which disturbances and at which frequencies input constraints may cause problems. This may give ideas on **which disturbances should be reduced**. +- In which direction \\(i\\) the plant gain is too small. By looking at the corresponding input singular vector \\(v\_i\\), one can determine **which actuators should be redesigned**. By looking at the corresponding output singular vector \\(u\_i\\), one can determine on which outputs we may have to reduce our performance requirements. + +For combined disturbances, one requires the \\(i\\)'th row sum of \\(U^H G\_d\\) to be less than \\(\sigma\_i(G)\\). However, we usually derive more insight by considering one disturbance at a time. + + +#### Unstable Plant and Input Constraints {#unstable-plant-and-input-constraints} + +Active use of inputs are needed to stabilize an unstable plant. +We must require \\(\hnorm{KS} \ge \normtwo{u\_p^H G\_s(p)^{-1}}\\). +If the required inputs exceed the constraints, then stabilization is most likely not possible. + + +### Limitation Imposed by Uncertainty {#limitation-imposed-by-uncertainty} + +The presence of **uncertainty requires the use of feedback** rather than simply feedforward control to get acceptable performance. +Sensitivity reduction with respect to uncertainty is achieved with high-gain feedback, but for any real system, we have a crossover frequency range where the loop gain has to drop below 1. The presence of uncertainty in this frequency range may result in poor performance or even instability. + +The issues are the same for SISO and MIMO systems, however, with MIMO systems there is an additional problem in that there is also **uncertainty associated with the plant directionality**. + + +#### Input and Output Uncertainty {#input-and-output-uncertainty} + +In practice, the difference between the true perturbed plant \\(G^\prime\\) and the plant model \\(G\\) is caused by a number of different sources. +We here focus on input and output uncertainty. +In multiplicative form, the input and output uncertainties are given by (see Fig. [fig:input_output_uncertainty](#fig:input_output_uncertainty)): +\\[ G^\prime = (I + E\_O) G (I + E\_I) \\] + + + +{{< figure src="/ox-hugo/skogestad07_input_output_uncertainty.png" caption="Figure 12: Plant with multiplicative input and output uncertainty" >}} + +Input and output uncertainty may seem similar, but their **implications for control may be very different**.
+ +If all the elements of \\(E\_O\\) and \\(E\_I\\) are non-zero, then we have **full block (unstructured) uncertainty**. + +In many cases, the source of uncertainty is in the individual input or output channels, and we have that \\(E\_I\\) and \\(E\_O\\) are **diagonal matrices**. For example \\(E\_I = \text{diag}\\{\epsilon\_1, \epsilon\_2, \dots\\}\\) where \\(\epsilon\_i\\) is the **relative uncertainty in input channel** \\(i\\). + +Diagonal input uncertainty is **always** present in real systems and the magnitude of \\(\epsilon\_i\\) is typically \\(0.1\\) or larger. + + +#### Effect of Uncertainty on Feedforward Control {#effect-of-uncertainty-on-feedforward-control} + +Consider a feedforward controller \\(u = K\_r r\\) for the case with no disturbance (\\(d = 0\\)). We assume that \\(G\\) is inversible and we select \\(K\_r = G^{-1}\\) to achieve perfect control (\\(e = 0\\)). +However, for the actual plant \\(G^\prime\\) (with uncertainty), the actual control error \\(e^\prime = y^\prime - r = G^\prime G^{-1} r - r\\) is not null and we get: + +- For output uncertainty: \\(e^\prime = E\_O r\\) +- For input uncertainty: \\(e^\prime = G E\_I G^{-1} r\\) + +For output uncertainty, we have an identical result as for SISO systems: the worst case relative control error \\(\normtwo{e^\prime}/\normtwo{r}\\) is equal to the magnitude of the relative output uncertainty \\(\maxsv(E\_O)\\). +However, for input uncertainty, the sensitivity may be much larger because the elements in the matrix \\(G E\_I G^{-1}\\) can be much larger than the elements in \\(E\_I\\). + +
+
+ +For diagonal input uncertainty, the elements of \\(G E\_I G^{-1}\\) are directly related to the RGA: +\\[ \left[ G E\_I G^{-1} \right]\_{ii} = \sum\_{j=1}^n \lambda\_{ij}(G) \epsilon\_j \\] + +
+ +Since diagonal input uncertainty is always present, we can conclude that **if the plant has large RGA elements within in the frequency range where effect control is desired, then it is not possible to achieve good reference tracking with feedforward control** because of strong sensitivity to diagonal input uncertainty. The reverse statement is not true. + + +#### Uncertainty and the Benefits of Feedback {#uncertainty-and-the-benefits-of-feedback} + +To illustrate the benefits of feedback control in reducing the sensitivity to uncertainty, we consider the effect of output uncertainty on reference tracking both for feedforward and feedback.
+ +**Feedforward** Let the nominal transfer function with feedforward control be \\(y = T\_r r\\) where \\(T\_r = G K\_r\\) and \\(K\_r = G^{-1}\\). +With model error \\(T\_r^\prime = G^\prime K\_r\\) and the change in response is \\(y^\prime - y = (T\_r^\prime - T\_r) r = (G^\prime - G)G^{-1} T\_r r = E\_O T\_r r\\). +Thus, the control error caused by the uncertainty is equal to the relative output uncertainty.
+ +**Feedback control** The output is \\(y = T r\\). +The change in response is \\(y^\prime - y = (T^\prime - T)r = S^\prime E\_O T r = S^\prime E\_O y\\). +With feedback control, **the effect of the uncertainty is reduced** by a factor \\(S^\prime\\) compared to that with feedforward control. + + +#### Uncertainty and the Sensitivity Peak {#uncertainty-and-the-sensitivity-peak} + +Consider a controller \\(K(s) = l(s)G^{-1}(s)\\) which results in a nominally decoupled response with sensitivity \\(S = s \cdot I\\) and complementary sensitivity \\(T = t \cdot I\\) where \\(t(s) = 1 - s(s)\\). +Suppose the plant has diagonal input uncertainty of relative magnitude \\(\abs{w\_I(j\w)}\\) in each input channel. +Then there exists a combination of input uncertainties such that at each frequency: +\\[ \maxsv(S^\prime) \ge \maxsv(S) \left( 1 + \frac{\abs{w\_I t}}{1+\abs{w\_I t}} \\|\Lambda(G)\\|\_{i\infty} \right) \\] +where \\(\\| \Lambda(G) \\|\_{i\infty}\\) is the maximum row sum of the RGA and \\(\maxsv(S) = \abs{s}\\). + +We can see that with an inverse based controller, the worst case sensitivity will be much larger than the nominal sensitivity at frequencies where the plant has large RGA elements. + +
+
+ +These statements apply to the frequency range around crossover. +By "small", we mean smaller than 2 and by "large" we mean larger than 10. + +- Condition number \\(\gamma(G)\\) or \\(\gamma(K)\\) small: robust performance to both diagonal and full-block input uncertainty +- Minimized condition number \\(\gamma\_I^\* (G)\\) or \\(\gamma\_O^\*(K)\\) small: robust performance to diagonal input uncertainty +- \\(\text{RGA}(G)\\) has large elements: inverse based controller is not robust to diagonal input uncertainty. + Since diagonal input uncertainty is unavoidable in practice, the rule is never to use a decoupling controller for a plant with large RGA-elements. + **Plant with large RGA elements are fundamentally difficult to control**. + +
+ + +#### Element-by-element Uncertainty {#element-by-element-uncertainty} + +Consider any complex matrix \\(G\\) and let \\(\lambda\_{ij}\\) denote the \\(ij\\)'th element in the RGA-matrix of \\(G\\). + +
+
+ +The matrix \\(G\\) becomes singular if we make a relative change \\(-1/\lambda\_{ij}\\) in its \\(ij\\)'th elements, that is, if a single element in \\(G\\) is perturbed from \\(g\_{ij}\\) to \\(g\_{pij} = g\_{ij}(1-\frac{1}{\lambda\_{ij}})\\) + +
+ +Thus, the RGA-matrix is a **direct measure of sensitivity to element-by-element uncertainty** and matrices with large RGA-values become singular for small relative errors in the elements. + +The above result has important implications: + +- **Identification**. Models of multivariable plants \\(G(s)\\) are often obtained by identifying one element at a time, for example using step responses. This simple analysis will most likely give meaningless results if there are large RGA-elements within the bandwidth where the model is intended to be used. +- **RHP-zeros**. Consider a plant with transfer function matrix \\(G(s)\\). If the relative uncertainty in an element at a given frequency is larger than \\(\abs{1/\lambda\_{ij}(j\w)}\\) then the plant may be singular at this frequency, implying that the uncertainty allows for a RHP-zero on the \\(j\w\text{-axis}\\). + + +### MIMO Input-Output Controllability {#mimo-input-output-controllability} + +The following procedure assumes that we have made a decision on the plant inputs and plant outputs, and we want to analyze the model \\(G\\) to find out **what control performance can be expected**. +It can also be used to assist in control structure design. + +A typical **MIMO controllability analysis** may proceed as follows: + +1. **Scale all variables** (inputs \\(u\\), outputs \\(y\\), disturbances \\(d\\), references \\(r\\)) to obtain a scaled model \\(y = G(s) u + G\_d(s) d\\), \\(r = R \tilde{r}\\) +2. **Obtain a minimal realization** +3. **Check functional controllability**. To be able to control the outputs independently, we first need at least as many inputs \\(u\\) as outputs \\(y\\). Second, we need the rank of \\(G(s)\\) to be equal to the number of outputs \\(l\\), i.e. the minimum singular value \\(G(j\w)\\), \\(\minsv(G) = \sigma\_l(G)\\), should be non-zero (except at possible \\(j\w\text{-axis}\\) zeros). If the plant is not functionally controllable, then compute the output direction where the plant has no gain to have insight into the source of the problem +4. **Compute the poles**. For RHP poles, obtain their locations and associated directions. "Fast" RHP-poles far from the origin are bad +5. **Compute the zeros**. For RHP zeros, obtain their locations and associated directions. Look for zeros pinned into certain outputs. "Small" RHP-zeros (close to the origin) are bad if tight performance is needed at low frequencies +6. **Obtain the frequency response** \\(G(j\w)\\) and **compute the RGA matrix** \\(\Gamma = G \times (G^\dagger)^{-1}\\). Plants with large RGA-elements at crossover frequencies are difficult to control and should be avoided +7. **Compute the singular values** of \\(G(j\w)\\) and **plot them as a function of frequency**. Also consider the associated input and output singular vectors +8. The **minimum singular value** \\(\minsv(G(j\w))\\) is a particularly useful **controllability measure**. It should generally be as large as possible at frequencies where control is needed. If \\(\minsv(G(j\w)) < 1\\) then we cannot at frequency \\(\w\\) make independent output changes of unit magnitude by using inputs of unit magnitude +9. For **disturbances**, consider the elements of the matrix \\(G\_d\\). At frequencies where one or more elements is larger than 1, we need control. We get more information by considering one disturbance at a time (the columns \\(g\_d\\) of \\(G\_d\\)). We must require for each disturbance that \\(S\\) is less than \\(1/\normtwo{g\_d}\\) in the disturbance direction \\(y\_d\\), i.e. \\(\normtwo{S y\_d} \le 1/\normtwo{g\_d}\\). Thus, we must at least require \\(\minsv(S) \le 1/\normtwo{g\_d}\\) and we may have to require \\(\maxsv(S) \le 1/\normtwo{g\_d}\\) +10. **Disturbances and input saturation**: + - **First step**. Consider the input magnitudes needed for perfect control by computing the elements in the matrix \\(G^\dagger G\_d\\). If all elements are less than 1 at all frequencies, then input saturation is not expected to be a problem. If some elements of \\(G^\dagger G\_d\\) are larger than 1, then perfect control cannot be achieve at this frequency, but "acceptable" control may be possible + - **Second step**. Consider the elements of \\(U^H G\_d\\) and make sure that the elements in the \\(i\\)'th row are smaller than \\(\sigma\_i(G) + 1\\) at all frequencies +11. **Are the requirements compatible?** Look at disturbances, RHP-poles, RHP-zeros and their associated locations and directions. For example, we must required for each disturbance and each RHP-zero that \\(\abs{y\_z^H g\_d(z)} \le 1\\). Similar relations exist for combined RHP-zero and RHP-pole. +12. **Uncertainty**. If the condition number \\(\gamma(G)\\) is small then we expect no particular problems with uncertainty. If the RGA-elements are large, we expect strong sensitivity to uncertainty. + + +##### Plant design changes {#plant-design-changes} + +If the plant is not input-output controllable, then it must be modified. +Some possible modifications are: + +- **Controlled outputs**. Identify the outputs which cannot be controlled satisfactory. Can the specifications for these be relaxed? +- **Manipulated inputs**. If input constraints are encountered, then consider replacing or moving actuators. If there are RHP-zeros which cause control problems, then the zeros may often be eliminated by adding another input. This may not be possible if the zero is pinned to a particular output +- **Extra measurements**. If the effect of disturbances or uncertainty is large, and the dynamics of the plant are such that acceptable control cannot be achieved, then consider adding "fast local loops" based on extra measurements which are located close to the inputs and disturbances +- **Disturbances**. If the effect of disturbances is too large, then see whether the disturbance itself may be reduced. This may involve adding extra equipment to dampen the disturbances. In other cases, this may involve improving or changing the control of another part of the system: we may have a disturbance which is actually the manipulated input for another part of the system +- **Plant dynamics and time delays**. In most cases, controllability is improved by making the plant dynamics faster and by reducing time delays. An exception to this is a strongly interactive plant, where an increased dynamic lag or time delay may be helpful if it somehow "delays" the effect of the interactions + + +### Conclusion {#conclusion} + +We have found that most of the insights into the performance limitation of SISO systems carry over to MIMO systems. +For RHP-zeros, RHP-poles and disturbances, the issue of directions usually makes the limitation **less severe** for MIMO than for SISO systems. +However, the situation is usually the opposite with model uncertainty because for MIMO systems, there is also uncertainty associated with plant directionality. + + +## Uncertainty and Robustness for SISO Systems {#uncertainty-and-robustness-for-siso-systems} + + + + +### Introduction to Robustness {#introduction-to-robustness} + +A control system is robust if it is insensitive to differences between the actual system and the model of the system which was used to design the controller. +The key idea in the \\(\hinf\\) robust control paradigm is to check whether the design specifications are satisfied even for the **"worst-case" uncertainty**. + +Our approach is then as follows: + +1. **Determine the uncertainty set**. Find a mathematical representation of the model uncertainty +2. **Check Robust Stability (RS)**. Determine whether the system remains stable for all plants in the uncertainty set +3. **Check Robust Performance (RP)**. If RS is satisfied, determine whether the performance specifications are met for all plants in the uncertainty set + +This approach may not always achieve optimal performance. In particular, if the worst case plant rarely occurs, other approaches, such as optimizing some average performance or using adaptive control may yield better performance.
+ +To account for model uncertainty, we will assume that the dynamic behavior of a plant is described not by a single linear time invariant model but by a **set \\(\Pi\\) of possible linear time invariant models**, sometimes denoted the "**uncertainty set**".
+ +We adopt the following notation: + +- \\(\Pi\\) - a set of possible perturbed plant models +- \\(G(s) \in \Pi\\) - nominal plant model +- \\(G\_p(s) \in \Pi\\) - particular perturbed plant models + +We will use a "**norm-bounded uncertainty description**" where the set \\(\Pi\\) is generated by allowing \\(\hinf\\) norm-bounded stable perturbations to the nominal plant \\(G(s)\\). +We let \\(E\\) denote a perturbation which is not normalized, and let \\(\Delta\\) denote a normalized perturbation with its \\(\hinf\\) norm less than 1. + + +### Representing Uncertainty {#representing-uncertainty} + +Uncertainty in the plant model may have **several origins**: + +1. There are always parameters in the linear model which are only known approximatively +2. Parameters in the model may vary due to **non-linearities** or changes in the operating conditions +3. Measurement devices have imperfections +4. At high frequencies, even the structure and the model order is unknown, and the uncertainty will always exceed \\(\SI{100}{\percent}\\) at some frequency +5. Even when a very detailed model is available, we may choose to work with a simpler nominal model and **represent the neglected dynamics as "uncertainty"** +6. The controller implemented may differ from the one obtained by solving the synthesis problem. + One may include uncertainty to allow for controller order reduction and implementation inaccuracies + +The various sources of model uncertainty may be grouped into two main classes: + +1. **Parametric uncertainty**. The structure of the model is known, but some parameters are uncertain +2. **Neglected and unmodelled dynamics uncertainty**. The model is in error because of missing dynamics, usually at high frequencies + +
+
+ +Parametric uncertainty will be quantified by assuming that **each uncertain parameters is bounded within some region** \\([\alpha\_{\min}, \alpha\_{\text{max}}]\\). That is, we have parameter sets of the form + +\begin{equation} + \alpha\_p = \bar{\alpha}(1 + r\_\alpha \Delta); \quad r\_\alpha = \frac{\alpha\_{\text{max}} - \alpha\_{\min}}{\alpha\_{\text{max}} + \alpha\_{\min}} +\end{equation} + +where \\(\bar{\alpha}\\) is the mean parameter value, \\(r\_\alpha\\) is the relative uncertainty in the parameter, and \\(\Delta\\) is any real scalar satisfying \\(\abs{\Delta} \le 1\\). + +
+ +Neglected and unmodelled dynamics uncertainty is somewhat less precise and thus more difficult to quantify, but it appears that frequency domain is particularly well suited for this class. +This leads to **complex perturbations** which we normalize such that \\(\hnorm{\Delta} \le 1\\).
+ +There is also a third class of uncertainty (which is a combination of the other two) called **Lumped uncertainty**. +Here the uncertainty description represents one or several sources of parametric and/or unmodelled dynamics uncertainty combined into a single lumped perturbation of a chosen structure. +The frequency domain is also well suited for describing lumped uncertainty. + +
+
+ +In most cases, we prefer to lump the uncertainty into a multiplicative uncertainty of the form +\\[ G\_p(s) = G(s)(1 + w\_I(s)\Delta\_I(s)); \quad \abs{\Delta\_I(j\w)} \le 1 \, \forall\w \\] +which may be represented by the diagram in Fig. [fig:input_uncertainty_set](#fig:input_uncertainty_set). + +
+ + + +{{< figure src="/ox-hugo/skogestad07_input_uncertainty_set.png" caption="Figure 13: Plant with multiplicative uncertainty" >}} + + +### Parametric Uncertainty {#parametric-uncertainty} + +Parametric uncertainty may also be represented in the \\(\hinf\\) framework if we restrict \\(\Delta\\) to be real. + +
+
+ +\\[ G\_p(s) = k\_p G\_0(s); \quad k\_{\min} \le k\_p \le k\_{\text{max}} \\] +where \\(k\_p\\) is an uncertain gain and \\(G\_0(s)\\) is a transfer function with no uncertainty. +By writing \\(k\_p = \bar{k}(1 + r\_k \Delta)\\) where \\(r\_k\\) is the relative magnitude of the gain uncertainty and \\(\bar{k}\\) is the average gain, be may write +\\[ G\_p = \underbrace{\bar{k}G\_0(s)}\_{G(s)} (1 + r\_k \Delta), \quad \abs{\Delta} \le 1 \\] +where \\(\Delta\\) is a real scalar and \\(G(s)\\) is the nominal plant. + +
+ +
+
+ +\\[ G\_p(s) = \frac{1}{\tau\_p s + 1}G\_0(s); \quad \tau\_{\min} \le \tau\_p \le \tau\_{\text{max}} \\] +By writing \\(\tau\_p = \bar{\tau}(1 + r\_\tau \Delta)\\), with \\(\abs{\Delta} \le 1\\), the model set can be rewritten as +\\[ G\_p(s) = \frac{G\_0}{1+\bar{\tau} s + r\_\tau \bar{\tau} s \Delta} = \underbrace{\frac{G\_0}{1+\bar{\tau}s}}\_{G(s)} \frac{1}{1 + w\_{iI}(s) \Delta} \\] +with \\(\displaystyle w\_{iI}(s) = \frac{r\_\tau \bar{\tau} s}{1 + \bar{\tau} s}\\). + +
+ +As shown in the two examples, one can represent parametric uncertainty in the \\(\hinf\\) framework. +However, **parametric uncertainty is often avoided** for the following reasons: + +1. It usually requires a **large effort** to model parametric uncertainty +2. A parametric uncertainty model is somewhat deceiving in the sense that it provides a very detailed and accurate description, even though the underlying assumptions about the model and the parameters may be much less exact +3. The **exact model structure is required** and so unmodelled dynamics cannot be dealt with +4. Real perturbations are required, which are more difficult to deal with mathematically and numerically, especially when it comes to controller synthesis + +Therefore, parametric uncertainty is often represented by **complex perturbations**. For example, we may simply replace the real perturbation, \\(-1 \le \Delta \le 1\\) by a complex perturbation with \\(\abs{\Delta(j\w)} \le 1\\). +This is of course conservative as it introduces possible plants that are not present in the original set. However, if there are several real perturbations, then the conservatism if often reduced by lumping these perturbations into a single complex perturbation. + + +### Representing Uncertainty in the Frequency Domain {#representing-uncertainty-in-the-frequency-domain} + + +#### Uncertain Regions {#uncertain-regions} + +To illustrate how parametric uncertainty translate into frequency domain uncertainty, consider in Fig. [fig:uncertainty_region](#fig:uncertainty_region) the Nyquist plots generated by the following set of plants +\\[ G\_p(s) = \frac{k}{\tau s + 1} e^{-\theta s}, \quad 2 \le k, \theta, \tau \le 3 \\] + +- **Step 1**. At each frequency, a region of complex numbers \\(G\_p(j\w)\\) is generated by varying the parameters. + In general, these uncertain regions have complicated shapes and complex mathematical descriptions +- **Step 2**. We therefore approximate such complex regions as discs, resulting in a **complex additive uncertainty description** + + + +{{< figure src="/ox-hugo/skogestad07_uncertainty_region.png" caption="Figure 14: Uncertainty regions of the Nyquist plot at given frequencies" >}} + + +#### Representing Uncertainty Regions by Complex Perturbations {#representing-uncertainty-regions-by-complex-perturbations} + +
+
+ +The disc-shaped regions may be generated by additive complex norm-bounded perturbations around a nominal plant \\(G\\) + +\begin{equation} + \begin{aligned} + \Pi\_A: \ G\_p(s) &= G(s) + w\_A(s) \Delta\_A(s) \\\\\\ + & \text{with }\abs{\Delta\_A(j\w)} \le 1 \, \forall\w + \end{aligned} +\end{equation} + +At each frequency, all possible \\(\Delta(j\w)\\) "generates" a disc-shaped region with radius 1 centered at 0, so \\(G(j\w) + w\_A(j\w)\Delta\_A(j\w)\\) generates at each frequency a disc-shapes region of radius \\(\abs{w\_A(j\w)}\\) centered at \\(G(j\w)\\) as shown in Fig. [fig:uncertainty_disc_generated](#fig:uncertainty_disc_generated). + +
+ + + +{{< figure src="/ox-hugo/skogestad07_uncertainty_disc_generated.png" caption="Figure 15: Disc-shaped uncertainty regions generated by complex additive uncertainty" >}} + +
+
+ +The disc-shaped region may alternatively be represented by a **multiplicative uncertainty** + +\begin{equation} + \begin{aligned} + \Pi\_I: \ G\_p(s) &= G(s)(1 + w\_I(s)\Delta\_I(s)); \\\\\\ + & \text{with }\abs{\Delta\_I(j\w)} \le 1 \, \forall\w + \end{aligned} +\end{equation} + +
+ +And we see that for SISO systems, additive and multiplicative uncertainty are equivalent if at each frequency: +\\[ \abs{w\_I(j\w)} = \abs{w\_A(j\w)}/\abs{G(j\w)} \\] + +However, **multiplicative weights are often preferred because their numerical value is more informative**. At frequencies where \\(\abs{w\_I(j\w)} > 1\\) the uncertainty exceeds \\(\SI{100}{\percent}\\) and the Nyquist curve may pass through the origin. +Then, at these frequencies, we do not know the phase of the plant, and we allow for zeros crossing from the left to the right-half plane. **Tight control is then not possible** at frequencies where \\(\abs{w\_I(j\w)} \ge 1\\). + + +#### Obtaining the Weight for Complex Uncertainty {#obtaining-the-weight-for-complex-uncertainty} + +Consider a set \\(\Pi\\) of possible plants resulting, for example, from parametric uncertainty. We now want to describe this set of plants by a single complex perturbation \\(\Delta\_A\\) or \\(\Delta\_I\\). + +This complex disc-shaped uncertainty description may be generated as follows: + +1. Select a nominal \\(G(s)\\) +2. **Additive uncertainty**. + At each frequency, find the smallest radius \\(l\_A(\w)\\) which includes all the possible plants \\(\Pi\\) + \\[ l\_A(\w) = \max\_{G\_p\in\Pi} \abs{G\_p(j\w) - G(j\w)} \\] + If we want a rational transfer function weight, \\(w\_A(s)\\), then it must be chosen to cover the set, so + \\[ \abs{w\_A(j\w)} \ge l\_A(\w) \quad \forall\w \\] + Usually \\(w\_A(s)\\) is of low order to simplify the controller design. +3. **Multiplicative uncertainty**. + This is often the preferred uncertainty form, and we have + \\[ l\_I(\w) = \max\_{G\_p\in\Pi} \abs{\frac{G\_p(j\w) - G(j\w)}{G(j\w)}} \\] + and with a rational weight \\(\abs{w\_I(j\w)} \ge l\_I(\w), \, \forall\w\\) + +
+
+ +We want to represent the following set using multiplicative uncertainty with a rational weight \\(w\_I(s)\\) +\\[ \Pi: \quad G\_p(s) = \frac{k}{\tau s + 1} e^{-\theta s}, \quad 2 \le k, \theta, \tau \le 3 \\] +To simplify subsequent controller design, we select a delay-free nominal model +\\[ G(s) = \frac{\bar{k}}{\bar{\tau} s + 1} = \frac{2.5}{2.5 s + 1} \\] + +To obtain \\(l\_I(\w)\\), we consider three values (2, 2.5 and 3) for each of the three parameters (\\(k, \theta, \tau\\)). +The corresponding relative errors \\(\abs{\frac{G\_p-G}{G}}\\) are shown as functions of frequency for the \\(3^3 = 27\\) resulting \\(G\_p\\) (Fig. [fig:uncertainty_weight](#fig:uncertainty_weight)). +To derive \\(w\_I(s)\\), we then try to find a simple weight so that \\(\abs{w\_I(j\w)}\\) lies above all the dotted lines. + +
+ + + +{{< figure src="/ox-hugo/skogestad07_uncertainty_weight.png" caption="Figure 16: Relative error for 27 combinations of \\(k,\ \tau\\) and \\(\theta\\). Solid and dashed lines: two weights \\(\abs{w\_I}\\)" >}} + + +#### Choice of Nominal Model {#choice-of-nominal-model} + +With parametric uncertainty represented as complex perturbations, there are three main options for the choice of nominal model: + +1. **A simplified model**, for instance a low order, delay free model. + It usually yields the largest uncertainty region, but the model is simple and this facilitates controller design in later stages. +2. **A model of mean parameter values**, \\(G(s) = \bar{G}(s)\\). + It is probably the most straightforward choice. +3. **The central plant obtained from a Nyquist plot**. + It yields the smallest region, but in this case a significant effort may be required to obtain the nominal model which is usually not a rational transfer function. + +For SISO systems, we find that for plants with an uncertain time delay, it is simplest and sometimes best to use a delay-free nominal model, and to represent the nominal delay as additional uncertainty. + +If we use a parametric uncertainty description, based on multiple real perturbations, then we should always use the mean parameter values in the nominal model. + + +#### Neglected Dynamics Represented as Uncertainty {#neglected-dynamics-represented-as-uncertainty} + +We saw that one advantage of frequency domain uncertainty description is that one can choose to work with a simple nominal model, and **represent neglected dynamics as uncertainty**. + +Consider a set of plants +\\[ G\_p(s) = G\_0(s) f(s) \\] +where \\(G\_0(s)\\) is fixed. +We want to neglect the term \\(f(s) \in \Pi\_f\\), and represent \\(G\_p\\) by multiplicative uncertainty with a nominal model \\(G = G\_0\\). + +The magnitude of the relative uncertainty caused by neglecting the dynamics in \\(f(s)\\) is +\\[ l\_I(\w) = \max\_{G\_p} \abs{\frac{G\_p - G}{G}} = \max\_{f(s) \in \Pi\_f} \abs{f(j\w) - 1} \\] + + +##### Neglected delay {#neglected-delay} + +Let \\(f(s) = e^{-\theta\_p s}\\), where \\(0 \le \theta\_p \le \theta\_{\text{max}}\\). We want to represent \\(G\_p(s) = G\_0(s)e^{-\theta\_p s}\\) by a delay-free plant \\(G\_0(s)\\) and multiplicative uncertainty. Let first consider the maximum delay, for which the relative error \\(\abs{1 - e^{-j \w \theta\_{\text{max}}}}\\) is shown as a function of frequency (Fig. [fig:neglected_time_delay](#fig:neglected_time_delay)). If we consider all \\(\theta \in [0, \theta\_{\text{max}}]\\) then: +\\[ l\_I(\w) = \begin{cases} \abs{1 - e^{-j\w\theta\_{\text{max}}}} & \w < \pi/\theta\_{\text{max}} \\ 2 & \w \ge \pi/\theta\_{\text{max}} \end{cases} \\] + + + +{{< figure src="/ox-hugo/skogestad07_neglected_time_delay.png" caption="Figure 17: Neglected time delay" >}} + + +##### Neglected lag {#neglected-lag} + +Let \\(f(s) = 1/(\tau\_p s + 1)\\), where \\(0 \le \tau\_p \le \tau\_{\text{max}}\\). In this case the resulting \\(l\_I(\w)\\) (Fig. [fig:neglected_first_order_lag](#fig:neglected_first_order_lag)) can be represented by a rational transfer function with \\(\abs{w\_I(j\w)} = l\_I(\w)\\) where +\\[ w\_I(s) = \frac{\tau\_{\text{max}} s}{\tau\_{\text{max}} s + 1} \\] + + + +{{< figure src="/ox-hugo/skogestad07_neglected_first_order_lag.png" caption="Figure 18: Neglected first-order lag uncertainty" >}} + + +##### Multiplicative weight for gain and delay uncertainty {#multiplicative-weight-for-gain-and-delay-uncertainty} + +Consider the following set of plants +\\[ G\_p = k\_p e^{-\theta\_p s} G\_0(s); \quad k\_p \in [k\_{\min}, k\_{\text{max}}], \ \theta\_p \in [\theta\_{\min}, \theta\_{\text{max}}] \\] +which we want to represent by multiplicative uncertainty and a delay-free nominal model \\(G(s) = \bar{k} G\_0(s)\\). +There is an exact expression, its first order approximation is +\\[ w\_I(s) = \frac{(1+\frac{r\_k}{2})\theta\_{\text{max}} s + r\_k}{\frac{\theta\_{\text{max}}}{2} s + 1} \\] +However, as shown in Fig. [fig:lag_delay_uncertainty](#fig:lag_delay_uncertainty), the weight \\(w\_I\\) is optimistic, especially around frequencies \\(1/\theta\_{\text{max}}\\). To make sure that \\(\abs{w\_I(j\w)} \le l\_I(\w)\\), we can apply a correction factor: +\\[ w\_I^\prime(s) = w\_I \cdot \frac{(\frac{\theta\_{\text{max}}}{2.363})^2 s^2 + 2\cdot 0.838 \cdot \frac{\theta\_{\text{max}}}{2.363} s + 1}{(\frac{\theta\_{\text{max}}}{2.363})^2 s^2 + 2\cdot 0.685 \cdot \frac{\theta\_{\text{max}}}{2.363} s + 1} \\] + +It is suggested to start with the simple weight and then if needed, to try the higher order weight. + + + +{{< figure src="/ox-hugo/skogestad07_lag_delay_uncertainty.png" caption="Figure 19: Multiplicative weight for gain and delay uncertainty" >}} + + +#### Unmodelled Dynamics Uncertainty {#unmodelled-dynamics-uncertainty} + +The most important reason for using frequency domain (\\(\hinf\\)) uncertainty description and complex perturbations, is the **incorporation of unmodelled dynamics**. +Unmodelled dynamics, while being close to neglected dynamics, also include unknown dynamics of unknown or even infinite order. + +
+
+ +To represent unmodelled dynamics, we usually use a simple **multiplicative weight** of the form + +\begin{equation} + w\_I(s) = \frac{\tau s + r\_0}{(\tau/r\_\infty) s + 1} +\end{equation} + +where \\(r\_0\\) is the relative uncertainty at steady-state, \\(1/\tau\\) is the frequency at which the relative uncertainty reaches \\(\SI{100}{\percent}\\), and \\(r\_\infty\\) is the magnitude of the weight at high frequency. + +
+ + +### SISO Robust Stability {#siso-robust-stability} + + +#### RS with Multiplicative Uncertainty {#rs-with-multiplicative-uncertainty} + +We want to determine the stability of the uncertain feedback system in Fig. [fig:feedback_multiplicative_uncertainty](#fig:feedback_multiplicative_uncertainty) where there is multiplicative uncertainty of magnitude \\(\abs{w\_I(j\w)}\\). +The loop transfer function becomes +\\[ L\_P = G\_p K = G K (1 + w\_I \Delta\_I) = L + w\_I L \Delta\_I \\] +We assume (by design) the stability of the nominal closed-loop system (with \\(\Delta\_I = 0\\)). +We use the Nyquist stability condition to test for robust stability of the closed loop system: + +\begin{align\*} + \text{RS} \quad &\stackrel{\text{def}}{\Longleftrightarrow} \quad \text{System stable} \ \forall L\_p \\\\\\ + &\Longleftrightarrow \quad L\_p \ \text{should not encircle -1}, \ \forall L\_p +\end{align\*} + + + +{{< figure src="/ox-hugo/skogestad07_input_uncertainty_set_feedback.png" caption="Figure 20: Feedback system with multiplicative uncertainty" >}} + + +##### Graphical derivation of RS-condition {#graphical-derivation-of-rs-condition} + +Consider the Nyquist plot of \\(L\_p\\) as shown in Fig. [fig:nyquist_uncertainty](#fig:nyquist_uncertainty). \\(\abs{1+L}\\) is the distance from the point \\(-1\\) to the center of the disc representing \\(L\_p\\) and \\(\abs{w\_I L}\\) is the radius of the disc. +Encirclements are avoided if none of the discs cover \\(-1\\), and we get: + +\begin{align\*} + \text{RS} \quad &\Leftrightarrow \quad \abs{w\_I L} < \abs{1 + L}, \ \forall\w \\\\\\ + &\Leftrightarrow \quad \abs{\frac{w\_I L}{1 + L}} < 1, \ \forall\w \\\\\\ + &\Leftrightarrow \quad \abs{w\_I T} < 1, \ \forall\w \\\\\\ +\end{align\*} + + + +{{< figure src="/ox-hugo/skogestad07_nyquist_uncertainty.png" caption="Figure 21: Nyquist plot of \\(L\_p\\) for robust stability" >}} + +
+
+ +The requirement of robust stability for the case with multiplicative uncertainty gives an **upper bound on the complementary sensitivity** + +\begin{equation} + \text{RS} \quad \Leftrightarrow \quad \abs{T} < 1/\abs{w\_I}, \ \forall\w +\end{equation} + +
+ +We see that we have to make \\(T\\) small at frequencies where the relative uncertainty \\(\abs{w\_I}\\) exceeds 1 in magnitude. + + +##### Algebraic derivation of RS-condition {#algebraic-derivation-of-rs-condition} + +Since \\(L\_p\\) is assumed stable, and the nominal closed-loop is stable, the nominal loop transfer function \\(L(j\w)\\) does not encircle -1. Therefore, since the set of plants is norm-bounded, it then follows that if some \\(L\_{p1}\\) in the uncertainty set encircles -1, then there must be another \\(L\_{p2}\\) in the uncertainty set which goes exactly through -1 at some frequency. Thus + +\begin{align\*} + \text{RS} \quad & \Leftrightarrow \abs{1 + L\_p} \ne 0,\ \forall L\_p,\,\forall \w\\\\\\ + & \Leftrightarrow \abs{1 + L\_p} > 0,\ \forall L\_p,\,\forall \w\\\\\\ + & \Leftrightarrow \abs{1 + L + w\_I L \Delta\_I} > 0,\ \forall \abs{\Delta\_I} \le 1,\,\forall \w\\\\\\ +\end{align\*} + +At each frequency, the last condition is most easily violated when the complex number \\(\Delta\_I(j\w)\\) is selected with \\(\abs{\Delta(j\w)} = 1\\) and with phase such that \\(1+L\\) and \\(w\_I L \Delta\_I\\) point in the opposite direction. Thus +\\[ \text{RS} \ \Leftrightarrow \ \abs{1 + L} - \abs{w\_I L} > 0, \ \forall\w \ \Leftrightarrow \ \abs{w\_I T} < 1, \ \forall\w \\] +And we obtain the same condition as before. + + +#### RS with Inverse Multiplicative Uncertainty {#rs-with-inverse-multiplicative-uncertainty} + +We will derive a corresponding RS-condition for feedback system with inverse multiplicative uncertainty (Fig. [fig:inverse_uncertainty_set](#fig:inverse_uncertainty_set)) in which +\\[ G\_p = G(1 + w\_{iI}(s) \Delta\_{iI})^{-1} \\] + + + +{{< figure src="/ox-hugo/skogestad07_inverse_uncertainty_set.png" caption="Figure 22: Feedback system with inverse multiplicative uncertainty" >}} + +We assume that \\(L\_p\\) and the nominal closed-loop systems are stable. Robust stability is guaranteed if \\(L\_p(j\w)\\) does not encircles the point -1: + +\begin{align\*} + \text{RS} \quad &\Leftrightarrow \quad \abs{1 + L\_p} > 0, \ \forall L\_p, \, \forall\w\\\\\\ + &\Leftrightarrow \quad \abs{1 + L (1 + w\_{iI} \Delta\_{iI})^{-1}} > 0, \ \forall \abs{\Delta\_{iI}} < 1, \, \forall\w\\\\\\ + &\Leftrightarrow \quad \abs{1 + w\_{iI} \Delta\_{iI} + L} > 0, \ \forall \abs{\Delta\_{iI}} < 1, \, \forall\w\\\\\\ + &\Leftrightarrow \quad \abs{1 + L} - \abs{w\_{iI} \Delta\_{iI}} > 0, \ \forall\w\\\\\\ + &\Leftrightarrow \quad \abs{w\_{iI} S} < 1, \ \forall\w\\\\\\ +\end{align\*} + +
+
+ +The requirement for robust stability for the case with inverse multiplicative uncertainty gives an **upper bound on the sensitivity** + +\begin{equation} + \text{RS} \quad \Leftrightarrow \quad \abs{S} < 1/\abs{w\_{iI}}, \ \forall\w +\end{equation} + +We see that we need tight control and have to make \\(S\\) small at frequencies where the uncertainty is large and \\(w\_{iI}\\) exceeds 1 in magnitude. + +
+ +The reason is that the uncertainty represents pole uncertainty, and at frequencies where \\(\abs{w\_{iI}}\\) exceeds 1, we allow for poles crossing from the left to the right-half plant, and we then need feedback (\\(\abs{S} < 1\\)) in order to stabilize the system. + + +### SISO Robust Performance {#siso-robust-performance} + + +#### SISO Nominal Performance {#siso-nominal-performance} + +
+
+ +The condition for nominal performance when considering performance in terms of the **weighted sensitivity** function is + +\begin{equation} + \begin{aligned} + \text{NP} &\Leftrightarrow \abs{w\_P S} < 1 \ \forall\omega \\\\\\ + &\Leftrightarrow \abs{w\_P} < \abs{1 + L} \ \forall\omega + \end{aligned} +\end{equation} + +
+ +Now \\(\abs{1 + L}\\) represents at each frequency the distance of \\(L(j\omega)\\) from the point \\(-1\\) in the Nyquist plot, so \\(L(j\omega)\\) must be at least a distance of \\(\abs{w\_P(j\omega)}\\) from \\(-1\\). +This is illustrated graphically in Fig. [fig:nyquist_performance_condition](#fig:nyquist_performance_condition). + + + +{{< figure src="/ox-hugo/skogestad07_nyquist_performance_condition.png" caption="Figure 23: Nyquist plot illustration of the nominal performance condition \\(\abs{w\_P} < \abs{1 + L}\\)" >}} + + +#### Robust Performance {#robust-performance} + +
+
+ +For robust performance, we require the performance condition to be satisfied for **all** possible plants: + +\begin{equation} + \begin{aligned} + \text{RP}\ &\overset{\text{def}}{\Leftrightarrow}\ \abs{w\_P S} < 1 \quad \forall S\_p, \forall \omega\\\\\\ + \ &\Leftrightarrow\ \abs{w\_P} < \abs{1 + L\_p} \quad \forall L\_p, \forall \omega + \end{aligned} +\end{equation} + +
+ +Let's consider the case of multiplicative uncertainty as shown on Fig. [fig:input_uncertainty_set_feedback_weight_bis](#fig:input_uncertainty_set_feedback_weight_bis). +The robust performance corresponds to requiring \\(\abs{\hat{y}/d}<1\ \forall \Delta\_I\\) and the set of possible loop transfer functions is +\\[ L\_p = G\_p K = L (1 + w\_I \Delta\_I) = L + w\_I L \Delta\_I \\] + + + +{{< figure src="/ox-hugo/skogestad07_input_uncertainty_set_feedback_weight_bis.png" caption="Figure 24: Diagram for robust performance with multiplicative uncertainty" >}} + + +##### Graphical derivation of RP-condition {#graphical-derivation-of-rp-condition} + +As illustrated on Fig. [fig:nyquist_performance_condition](#fig:nyquist_performance_condition), we must required that all possible \\(L\_p(j\omega)\\) stay outside a disk of radius \\(\abs{w\_P(j\omega)}\\) centered on \\(-1\\). +Since \\(L\_p\\) at each frequency stays within a disk of radius \\(|w\_I(j\omega) L(j\omega)|\\) centered on \\(L(j\omega)\\), the condition for RP becomes: + +\begin{align\*} + \text{RP}\ &\Leftrightarrow\ \abs{w\_P} + \abs{w\_I L} < \abs{1+L} \quad \forall\omega\\\\\\ + &\Leftrightarrow\ \abs{w\_P(1 + L)^{-1}} + \abs{w\_I L(1 + L)^{-1}} < 1 \quad \forall\omega\\\\\\ +\end{align\*} + +
+
+ +Finally, we obtain the following condition for Robust Performance: + +\begin{equation} + \text{RP} \ \Leftrightarrow\ \max\_{\omega} \left(\abs{w\_P S} + \abs{w\_I T} \right) < 1 +\end{equation} + +
+ + +##### Algebraic derivation of RP-condition {#algebraic-derivation-of-rp-condition} + +RP is satisfied if the worst-case weighted sensitivity at each frequency is less than \\(1\\): +\\[ \text{RP} \ \Leftrightarrow\ \max\_{S\_p} \abs{w\_P S\_p} < 1, \quad \forall\omega \\] + +The perturbed sensitivity \\(S\_p\\) is +\\[ S\_p = \frac{1}{1 + L\_p} = \frac{1}{1 + L + w\_I L \Delta\_I} \\] +Thus: +\\[ \max\_{S\_p} \abs{w\_P S\_p} = \frac{\abs{w\_P}}{\abs{1 + L} - \abs{w\_I L}} = \frac{\abs{w\_P S}}{1 - \abs{w\_I T}} \\] +And we obtain the same RP-condition as the graphically derived one. + + +##### Remarks on RP-condition {#remarks-on-rp-condition} + +1. The RP-condition for this problem is closely approximated by the mixed sensitivity \\(\hinf\\) condition: + \\[ \tcmbox{\hnorm{\begin{matrix}w\_P S \\ w\_I T\end{matrix}} = \max\_{\omega} \sqrt{\abs{w\_P S}^2 + \abs{w\_I T}^2} <1} \\] + This condition is within a factor at most \\(\sqrt{2}\\) of the true RP-condition. + This means that **for SISO systems, we can closely approximate the RP-condition in terms of an \\(\hinf\\) problem**, so there is no need to make use of the structured singular value. + However, we will see that the situation can be very different for MIMO systems. +2. The RP-condition can be used to derive bounds on the loop shape \\(\abs{L}\\): + + \begin{align\*} + \abs{L} &> \frac{1 + \abs{w\_P}}{1 - \abs{w\_I}}, \text{ at frequencies where } \abs{w\_I} < 1\\\\\\ + \abs{L} &< \frac{1 - \abs{w\_P}}{1 + \abs{w\_I}}, \text{ at frequencies where } \abs{w\_P} < 1\\\\\\ + \end{align\*} + + +#### The Relationship Between NP, RS and RP {#the-relationship-between-np-rs-and-rp} + +Consider a SISO system with multiplicative input uncertainty, and assume that the closed-loop is nominally stable (NS). +The conditions for nominal performance (NP), robust stability (RS) and robust performance (RP) as summarized as follows: + +
+
+ +\begin{subequations} + \begin{align} + \text{NP} & \Leftrightarrow |w\_P S| < 1,\ \forall \omega \\\\\\ + \text{RS} & \Leftrightarrow |w\_I T| < 1,\ \forall \omega \\\\\\ + \text{RP} & \Leftrightarrow |w\_P S| + |w\_I T| < 1,\ \forall \omega + \end{align} +\end{subequations} + +
+ +From this we see that **a prerequisite for RP is that we satisfy both NP and RS**. +This applies in general, both for SISO and MIMO systems and for any uncertainty.
+ +In addition, for SISO systems, if we satisfy both RS and NP, then we have at each frequency: +\\[ |w\_P S| + |w\_I T| < 2 \cdot \max \\{|w\_P S|, |w\_I T|\\} < 2 \\] +It then follows that, within a factor at most 2, we will automatically get RP when NP and RS are satisfied. +This, RP is not a "big issue" for SISO systems.
+ +To satisfy RS we generally want \\(T\\) small, whereas to satisfy \\(NP\\) we generally want \\(S\\) small. +However, we cannot make both \\(S\\) and \\(T\\) small at the same frequency because of the identity \\(S + T = 1\\). +This has implications for RP: + +\begin{align\*} + |w\_P S| + |w\_I T| &\ge \text{min}\\{|w\_P|, |w\_I|\\}(|S| + |T|) \\\\\\ + &\ge \text{min}\\{|w\_P|, |w\_I|\\}(|S + T|) \\\\\\ + &\ge \text{min}\\{|w\_P|, |w\_I|\\} +\end{align\*} + +This means that we cannot have both \\(|w\_P|>1\\) (i.e. good performance) and \\(|w\_I|>1\\) (i.e. more than \\(\si{100}{\%}\\) uncertainty) at the same frequency. + + +### Examples of Parametric Uncertainty {#examples-of-parametric-uncertainty} + + +#### Parametric Pole Uncertainty {#parametric-pole-uncertainty} + +Consider the following set of plants: +\\[ G\_p(s) = \frac{1}{s - a\_p} G\_0(s); \quad a\_\text{min} \le a\_p \le a\_{\text{max}} \\] + +If \\(a\_\text{min}\\) and \\(a\_\text{max}\\) have different signs, then this means that the plant can change from stable to unstable with the pole crossing through the origin. + +This set of plants can be written as +\\[ G\_p(s) = \frac{G\_0(s)}{s - \bar{a}(1 + r\_a \Delta)}; \quad -1 \le \Delta \le 1 \\] +which can be exactly described by inverse multiplicative uncertainty: +\\[ G(s) = \frac{G\_0(s)}{(s - \bar{a})}; \quad w\_{iI}(s) = \frac{r\_a \bar{a}}{s - \bar{a}} \\] + +The magnitude of \\(w\_{iI}(s)\\) is equal to \\(r\_a\\) at low frequency and goes to \\(0\\) at high frequencies. + + +##### Time constant form {#time-constant-form} + +It is also interesting to consider another form of pole uncertainty, namely that associated with the time constant: +\\[ G\_p(s) = \frac{1}{\tau\_p s + 1} G\_0(s); \quad \tau\_\text{min} \le \tau\_p \le \tau\_\text{max} \\] + +The corresponding uncertainty weight is +\\[ w\_{iI}(s) = \frac{r\_\tau \bar{\tau} s}{1 + \bar{\tau} s} \\] + +This results in uncertainty in the pole location, but here the uncertainty affects the model at high frequency. + + +#### Parametric Zero Uncertainty {#parametric-zero-uncertainty} + +Consider zero uncertainty in the "time constant" form as in: +\\[ G\_p(s) = (1 + \tau\_p s)G\_0(s); \quad \tau\_\text{min} \le \tau\_p \le \tau\_\text{max} \\] + +This set of plants may be written as multiplicative uncertainty with: +\\[ w\_I(s) = \frac{r\_\tau \bar{\tau} s}{1 + \bar{\tau} s} \\] +The magnitude \\(|w\_I(j\omega)|\\) is small at low frequencies and approaches \\(r\_\tau\\) at high frequencies. +For cases with \\(r\_\tau > 1\\) we allow the zero to cross from the LHP to the RHP. + + +#### Parametric State-Space Uncertainty {#parametric-state-space-uncertainty} + +A general procedure for handling parametric uncertainty which is more suited for numerical calculations, is parametric state-space uncertainty. +Consider an uncertain state-space model: + +\begin{align\*} + \dot{x} &= A\_p x + B\_p u \\\\\\ + y &= C\_p x + D\_p u +\end{align\*} + +Assume that the underlying cause for the uncertainty is uncertainty in some real parameters \\(\delta\_1, \delta\_2, \dots\\) and assume that the state space matrices depends linearly on these parameters: + +\begin{align\*} + A\_p = A + \sum \delta\_i A\_i; \quad & B\_p = B + \sum \delta\_i B\_i \\\\\\ + C\_p = C + \sum \delta\_i C\_i; \quad & D\_p = D + \sum \delta\_i D\_i +\end{align\*} + +where \\(A\\), \\(B\\), \\(C\\) and \\(D\\) model the nominal system. + +We can collect the perturbations \\(\delta\_i\\) in a large diagonal matrix \\(\Delta\\) with the real \\(\delta\_i\\)'s along its diagonal: +\\[ A\_p = A + \sum \delta\_i A\_i = A + W\_2 \Delta W\_1 \\] + +In the transfer function form: + +\begin{align\*} + (s I - A\_p)^{-1} &= (sI - A - W\_2 \Delta W\_1)^{-1} \\\\\\ + &= (I - \Phi(s) W\_2 \Delta W\_1)^{-1} \Phi(s) +\end{align\*} + +with \\(\Phi(s) \triangleq (sI - A)^{-1}\\). + +This is illustrated in the block diagram of Fig. [fig:uncertainty_state_a_matrix](#fig:uncertainty_state_a_matrix), which is in the form of an inverse additive perturbation. + + + +{{< figure src="/ox-hugo/skogestad07_uncertainty_state_a_matrix.png" caption="Figure 25: Uncertainty in state space A-matrix" >}} + + +### Conclusion {#conclusion} + +Model uncertainty for SISO systems can be represented in the frequency domain using complex norm-bounded perturbations \\(\hnorm{\Delta} \le 1\\).
+ +Requirements of robust stability for the case of multiplicative complex uncertainty imposes an upper bound on the allowed complementary sensitivity, \\(\abs{w\_I T} < 1, \ \forall\w\\).
+ +Similarly, the inverse multiplicative uncertainty imposes an upper bound on the sensitivity, \\(\abs{w\_{iI} S} < 1, \ \forall\w\\).
+ +We also derived a condition for robust performance with multiplicative uncertainty, \\(\abs{w\_P S} + \abs{w\_I T} < 1, \ \forall\w\\). + + +## Robust Stability and Performance Analysis {#robust-stability-and-performance-analysis} + + + + +### General Control Configuration with Uncertainty {#general-control-configuration-with-uncertainty} + +The starting point for our robustness analysis is a system representation in which the uncertain perturbations are "pulled out" into a **block diagonal matrix** +\\[ \Delta = \text{diag} \\{\Delta\_i\\} = \begin{bmatrix}\Delta\_1 \\ & \ddots \\ & & \Delta\_i \\ & & & \ddots \end{bmatrix} \\] +where each \\(\Delta\_i\\) represents a **specific source of uncertainty**, e.g. input uncertainty \\(\Delta\_I\\) or parametric uncertainty \\(\delta\_i\\). + +If we also pull out the controller \\(K\\), we get the generalized plant \\(P\\) as shown in Fig. [fig:general_control_delta](#fig:general_control_delta). This form is useful for controller synthesis. + + + +{{< figure src="/ox-hugo/skogestad07_general_control_delta.png" caption="Figure 26: General control configuration used for controller synthesis" >}} + +If the controller is given and we want to analyze the uncertain system, we use the \\(N\Delta\text{-structure}\\) in Fig. [fig:general_control_Ndelta](#fig:general_control_Ndelta). + + + +{{< figure src="/ox-hugo/skogestad07_general_control_Ndelta.png" caption="Figure 27: \\(N\Delta\text{-structure}\\) for robust performance analysis" >}} + +\\(N\\) is related to \\(P\\) and \\(K\\) by a **lower LFT** + +\begin{align\*} + N &= F\_l(P, K) \\\\\\ + &\triangleq P\_{11} + P\_{12} K (I - P\_{22}K)^{-1} P\_{21} +\end{align\*} + +Similarly, the uncertain closed-loop transfer function from \\(w\\) to \\(z\\), is related to \\(N\\) and \\(\Delta\\) by an **upper LFT** + +\begin{align\*} + F &= F\_u(N, \Delta) \\\\\\ + &\triangleq N\_{22} + N\_{21} \Delta (I - N\_{11} \Delta)^{-1} N\_{12} +\end{align\*} + +To analyze robust stability of \\(F\\), we can rearrange the system into the \\(M\Delta\text{-structure}\\) shown in Fig. [fig:general_control_Mdelta_bis](#fig:general_control_Mdelta_bis) where \\(M = N\_{11}\\) is the transfer function from the output to the input of the perturbations. + + + +{{< figure src="/ox-hugo/skogestad07_general_control_Mdelta_bis.png" caption="Figure 28: \\(M\Delta\text{-structure}\\) for robust stability analysis" >}} + + +### Representing Uncertainty {#representing-uncertainty} + +Each individual perturbation is assumed to be **stable and normalized**: +\\[ \maxsv(\Delta\_i(j\w)) \le 1 \quad \forall\w \\] + +As the maximum singular value of a block diagonal matrix is equal to the largest of the maximum singular values of the individual blocks, it then follows for \\(\Delta = \text{diag}\\{\Delta\_i\\}\\) that +\\[ \maxsv(\Delta\_i(j\w)) \le 1 \quad \forall\w, \forall i \quad \Leftrightarrow \quad \tcmbox{\hnorm{\Delta} \le 1} \\] + + +#### Differences Between SISO and MIMO Systems {#differences-between-siso-and-mimo-systems} + +The main difference between SISO and MIMO systems is the concept of directions which is only relevant in the latter. +As a consequence, MIMO systems may experience **much larger sensitivity to uncertainty** than SISO systems. + + +#### Parametric Uncertainty {#parametric-uncertainty} + +The representation of parametric uncertainty for MIMO systems is the same as for SISO systems. +However, the inclusion of parametric uncertainty may be more significant for MIMO plants because it offers a simple method of representing uncertain transfer function elements. + + +#### Unstructured Uncertainty {#unstructured-uncertainty} + +Unstructured perturbations are often used to get a simple uncertainty model. +We here define unstructured uncertainty as the use of a "full" complex perturbation matrix \\(\Delta\\), usually with dimensions compatible with those of the plant, where at each frequency any \\(\Delta(j\w)\\) satisfying \\(\maxsv(\Delta(j\w)) < 1\\) is allowed. + +Three common forms of **feedforward unstructured uncertainty** are shown Fig. [fig:feedforward_uncertainty](#fig:feedforward_uncertainty): additive uncertainty, multiplicative input uncertainty and multiplicative output uncertainty. + +
+
+ +\begin{alignat\*}{3} + &\Pi\_A: \quad &&G\_p = G + E\_A; \quad& &E\_a = w\_A \Delta\_a \\\\\\ + &\Pi\_I: \quad &&G\_p = G(I + E\_I); \quad& &E\_I = w\_I \Delta\_I \\\\\\ + &\Pi\_O: \quad &&G\_p = (I + E\_O)G; \quad& &E\_O = w\_O \Delta\_O +\end{alignat\*} + +
+ + +
+ Table 4: + Common feedforward unstructured uncertainty +
+ +| ![](/ox-hugo/skogestad07_additive_uncertainty.png) | ![](/ox-hugo/skogestad07_input_uncertainty.png) | ![](/ox-hugo/skogestad07_output_uncertainty.png) | +|----------------------------------------------------|----------------------------------------------------------|-----------------------------------------------------------| +| Additive uncertainty | Multiplicative input uncertainty | Multiplicative output uncertainty | + +In Fig. [fig:feedback_uncertainty](#fig:feedback_uncertainty), three **feedback or inverse unstructured uncertainty** forms are shown: inverse additive uncertainty, inverse multiplicative input uncertainty and inverse multiplicative output uncertainty. + +
+
+ +\begin{alignat\*}{3} + &\Pi\_{iA}: \quad &&G\_p = G(I - E\_{iA} G)^{-1}; & & \quad E\_{ia} = w\_{iA} \Delta\_{ia} \\\\\\ + &\Pi\_{iI}: \quad &&G\_p = G(I - E\_{iI})^{-1}; & & \quad E\_{iI} = w\_{iI} \Delta\_{iI} \\\\\\ + &\Pi\_{iO}: \quad &&G\_p = (I - E\_{iO})^{-1}G; & & \quad E\_{iO} = w\_{iO} \Delta\_{iO} +\end{alignat\*} + +
+ + +
+ Table 5: + Common feedback unstructured uncertainty +
+ +| ![](/ox-hugo/skogestad07_inv_additive_uncertainty.png) | ![](/ox-hugo/skogestad07_inv_input_uncertainty.png) | ![](/ox-hugo/skogestad07_inv_output_uncertainty.png) | +|--------------------------------------------------------|------------------------------------------------------------------|-------------------------------------------------------------------| +| Inverse additive uncertainty | Inverse multiplicative input uncertainty | Inverse multiplicative output uncertainty | + + +##### Lumping uncertainty into a single perturbation {#lumping-uncertainty-into-a-single-perturbation} + +For SISO systems, we usually lump multiple sources of uncertainty into a single complex perturbation; often in the multiplicative form. +This may be also done for MIMO systems, but then it makes a difference whether the perturbation is at the input or the output.
+ +Since **output uncertainty is frequently less restrictive than input uncertainty in terms of control performance**, we first attempt to lump the uncertainty at the output. For example, a set of plant \\(\Pi\\) may be represented by multiplicative output uncertainty with a scalar weight \\(w\_O(s)\\) using +\\[ G\_p = (I + w\_O \Delta\_O) G, \quad \hnorm{\Delta\_O} \le 1 \\] +where +\\[ l\_O(\w) = \max\_{G\_p \in \Pi} \maxsv\left( (G\_p - G)G^{-1} \right); \ \abs{w\_O(j\w)} \ge l\_O(\w), \, \forall\w \\] + +If the resulting uncertainty weight is reasonable and the analysis shows that robust stability and performance may be achieve, then this lumping of uncertainty at the output is fine. +If this is not the case, then one may try to lump the uncertainty at the input instead, using multiplicative input uncertainty with a scalar weight, +\\[ G\_p = G(I + w\_I \Delta\_I), \quad \hnorm{\Delta\_I} \le 1 \\] +where +\\[ l\_I(\w) = \max\_{G\_p \in \Pi} \maxsv\left( G^{-1}(G\_p - G) \right); \ \abs{w\_I(j\w)} \ge l\_I(\w), \, \forall\w \\] + +However, in many cases, this approach of lumping uncertainty either at the output or the input does **not** work well because **it usually introduces additional plants** that were not present in the original set. + + +##### Conclusion {#conclusion} + +Ideally, we would like to lump several sources of uncertainty into a single perturbation to get a simple uncertainty description. +Often an unstructured multiplicative output perturbation is used. +However, we should be careful about doing this, at least for plants with a large condition number. +In such cases we may have to represent the uncertainty as it occurs physically (at the input, in the elements, etc.) thereby generating several perturbations. + + +#### Diagonal Uncertainty {#diagonal-uncertainty} + +By "diagonal uncertainty" we mean that the perturbation is a complex diagonal matrix +\\[ \Delta(s) = \text{diag}\\{\delta\_i(s)\\}; \quad \abs{\delta\_i(j\w)} \le 1, \ \forall\w, \, \forall i \\] + +Diagonal uncertainty usually arises from a consideration of uncertainty or neglected dynamics in the **individual input or output channels**. +This type of diagonal uncertainty is **always present**. + +
+
+ +Let us consider uncertainty in the input channels. With each input \\(u\_i\\), there is a physical system (amplifier, actuator, etc.) which based on the controller output signal \\(u\_i\\), generates a physical plant input \\(m\_i\\) +\\[ m\_i = h\_i(s) u\_i \\] +The scalar transfer function \\(h\_i(s)\\) is often absorbed into the plant model \\(G(s)\\). +We can represent its uncertainty as multiplicative uncertainty +\\[ h\_{pi}(s) = h\_i(s)(1 + w\_{Ii}(s)\delta\_i(s)); \quad \abs{\delta\_i(j\w)} \le 1, \, \forall\w \\] +which after combining all input channels results in diagonal input uncertainty for the plant + +\begin{align\*} + G\_p(s) = G(I + W\_I \Delta\_I) \text{ with } &\Delta\_I = \diag{\delta\_i} \\\\\\ + &W\_I = \diag{w\_{Ii}} +\end{align\*} + +
+ +Normally, we would represent the uncertainty in each input or output channel using a simple weight in the form +\\[ w(s) = \frac{\tau s + r\_0}{(\tau/r\_\infty)s + 1} \\] +where \\(r\_0\\) is the relative uncertainty at steady-state, \\(1/\tau\\) is the frequency where the relative uncertainty reaches \\(\SI{100}{\percent}\\), and \\(r\_\infty\\) is the magnitude of the weight at high frequencies.
+ +**Diagonal input uncertainty should always be considered because**: + +- it is **always** present and a system which is sensitive to this uncertainty will not work in practice +- it often **restrict achievable performance** with multivariable control + + +### Obtaining \\(P\\), \\(N\\) and \\(M\\) {#obtaining--p----n--and--m} + +Let's consider the feedback system with multiplicative input uncertainty \\(\Delta\_I\\) shown Fig. [fig:input_uncertainty_set_feedback_weight](#fig:input_uncertainty_set_feedback_weight). +\\(W\_I\\) is a normalization weight for the uncertainty and \\(W\_P\\) is a performance weight. + + + +{{< figure src="/ox-hugo/skogestad07_input_uncertainty_set_feedback_weight.png" caption="Figure 29: System with multiplicative input uncertainty and performance measured at the output" >}} + +We want to derive the generalized plant \\(P\\) which has inputs \\([u\_\Delta,\ w,\ u]^T\\) and outputs \\([y\_\Delta,\ z,\ v]^T\\). + +By breaking the loop before and after \\(K\\) and \\(\Delta\_I\\), we get +\\[ P = \begin{bmatrix} + 0 & 0 & W\_I \\\\\\ + W\_P G & W\_P & W\_P G \\\\\\ + -G & -I & -G +\end{bmatrix} \\] + +Next, we want to derive the matrix \\(N\\). We fist partition \\(P\\) to be compatible with \\(K\\): + +\begin{align\*} + P\_{11} = \begin{bmatrix}0&0\\GW\_P&W\_P\end{bmatrix},\quad & P\_{12} = \begin{bmatrix}W\_I\\GW\_P\end{bmatrix} \\\\\\ + P\_{21} = \begin{bmatrix}G&-1\end{bmatrix}, \quad & P\_{22} = -G \\\\\\ +\end{align\*} + +and then we find \\(N\\) using \\(N = F\_l(P, K)\\). + + +### Definitions of Robust Stability and Robust Performance {#definitions-of-robust-stability-and-robust-performance} + +The next step is to check whether we have stability and acceptable performance for all plant in the set: + +1. **Robust stability analysis**: with a given controller \\(K\\) we determine whether the system remains stable for all plants in the uncertainty set +2. **Robust performance analysis**: is RS is satisfied, we determine how "large" the transfer function from exogenous inputs \\(w\\) to outputs \\(z\\) may be for all plants in the uncertainty set + +We have \\(z = F(\Delta) \cdot w\\) with + +\begin{align\*} + F &= F\_u(N, \Delta) \\\\\\ + &\triangleq N\_{22} + N\_{21}\Delta(I - N\_{11}\Delta)^{-1} N\_{12} +\end{align\*} + +We here use \\(\hinf\\) norm to define performance and require for RP that \\(\hnorm{F(\Delta)} \le 1\\) for all allowed \\(\Delta\\). +A typical choice is \\(F = w\_P S\_P\\) where \\(w\_P\\) is the performance weight and \\(S\_P\\) represents the set of perturbed sensitivity functions. + +
+
+ +In terms of the \\(N\Delta\text{-structure}\\), our requirements for stability and performance can be summarized as follows: + +\begin{align\*} +\text{NS} &\ \stackrel{\text{def}}{\Longleftrightarrow} \ N \text{ is internally stable} \\\\\\ +\text{NP} &\ \stackrel{\text{def}}{\Longleftrightarrow} \ \text{NS and } \hnorm{N\_{22}} < 1 \\\\\\ +\text{RS} &\ \stackrel{\text{def}}{\Longleftrightarrow} \ \text{NS and } F = F\_u(N, \Delta) \text{ is stable } \forall\Delta \\\\\\ +\text{RP} &\ \stackrel{\text{def}}{\Longleftrightarrow} \ \text{NS and } \hnorm{F} < 1, \quad \forall \Delta, \,\hnorm{\Delta} \le 1 \\\\\\ +\end{align\*} + +
+ + +### Robust Stability for the \\(M\Delta\text{-structure}\\) {#robust-stability-for-the--m-delta-text-structure} + +Consider the uncertain \\(N\Delta\text{-system}\\) for which the transfer function from \\(w\\) to \\(z\\) is +\\[ F\_u(N, \Delta) = N\_{22} + N\_{21}\Delta(I - N\_{11}\Delta)^{-1} N\_{12} \\] +Suppose that the system is nominally stable (with \\(\Delta = 0\\)) that is \\(N\\) is stable. We also assume that \\(\Delta\\) is stable. +We then see from the above equation that the **only possible source of instability** is the feedback term \\((I - N\_{11}\Delta)^{-1}\\). +Thus, when we have nominal stability, the stability of the \\(N\Delta\text{-structure}\\) is equivalent to the stability of the \\(M\Delta\text{-structure}\\) where \\(M = N\_{11}\\). + +We thus need to derive conditions for checking the stability of the \\(M\Delta\text{-structure}\\). + +
+
+ +Assume that the nominal system \\(M(s)\\) and the perturbations \\(\Delta(s)\\) are stable. +Consider the convex set of perturbations \\(\Delta\\), such that if \\(\Delta^\prime\\) is an allowed perturbation then so is \\(c\Delta^\prime\\) where c is any **real** scalar such that \\(\abs{c} \le 1\\). +Then the \\(M\Delta\text{-structure}\\) is stable for all allowed perturbations **if and only if** the Nyquist plot of \\(\det\left( I - M\Delta(s) \right)\\) does not encircle the origin, \\(\forall\Delta\\): + +\begin{equation} + \det( I - M\Delta(j\w)) \ne 0, \quad \forall\w, \, \forall\Delta +\end{equation} + +
+ +
+
+ +Assume that the nominal system \\(M(s)\\) and the perturbations \\(\Delta(s)\\) are stable. +Consider the class of perturbations, \\(\Delta\\), such that if \\(\Delta^\prime\\) is an allowed perturbation, then so is \\(c\Delta^\prime\\) where c is any **complex** scalar such that \\(\abs{c} \le 1\\). +Then the \\(M\Delta\text{-structure}\\) is stable for all allowed perturbations **if and only if**: + +\begin{equation} +\begin{aligned} + &\rho(M\Delta(j\w)) < 1, \quad \forall\w, \, \forall\Delta\\\\\\ + \Leftrightarrow \quad &\max\_{\Delta} \rho(M\Delta(j\w)) < 1, \quad \forall\w +\end{aligned} +\end{equation} + +
+ + +### RS for Complex Unstructured Uncertainty {#rs-for-complex-unstructured-uncertainty} + +Let \\(\Delta\\) be the set of all complex matrices such that \\(\maxsv(\Delta) \le 1\\) (\\(\\|\Delta\\|\_\infty \le 1\\)). +This is often referred to as **unstructured uncertainty** or as full-block complex perturbation uncertainty. +Then we have + +\begin{align\*} + \max\_\Delta \rho(M\Delta) &= \max\_\Delta \maxsv(M\Delta) \\\\\\ + &= \max\_\Delta \maxsv(\Delta) \maxsv(M) \\\\\\ + &= \maxsv(M) +\end{align\*} + +
+
+ +Assume that the nominal system \\(M(s)\\) is stable and that the perturbations \\(\Delta(s)\\) are stable. +Then the \\(M\Delta\text{-system}\\) is stable for all perturbations \\(\Delta\\) satisfying \\(\hnorm{\Delta} \le 1\\) if and only if + +\begin{equation} + \maxsv(M(j\w)) < 1 \ \forall\w \quad \Leftrightarrow \quad \hnorm{M} < 1 +\end{equation} + +
+ + +#### Application of the Unstructured RS-condition {#application-of-the-unstructured-rs-condition} + +We will now present necessary and sufficient conditions for robust stability for each of the six single unstructured perturbations in Figs [fig:feedforward_uncertainty](#fig:feedforward_uncertainty) and [fig:feedback_uncertainty](#fig:feedback_uncertainty) with +\\[ E = W\_2 \Delta W\_1, \quad \hnorm{\Delta} \le 1 \\] + +To derive the matrix \\(M\\) we simply "isolate" the perturbation, and determine the transfer function matrix +\\[ M = W\_1 M\_0 W\_2 \\] +from the output to the input of the perturbation, where \\(M\_0\\) for each of the six cases is given by + +\begin{alignat\*}{2} + G\_p &= G + E\_A: \quad && M\_0 = K (I + GK)^{-1} = KS\\\\\\ + G\_p &= G(I + E\_I): \quad && M\_0 = K (I + GK)^{-1}G = T\_I\\\\\\ + G\_p &= (I + E\_O)G: \quad && M\_0 = G K (I + GK)^{-1} = T\\\\\\ + G\_p &= G(I - E\_{iA}G)^{-1}: \quad && M\_0 = (I + GK)^{-1} G = SG\\\\\\ + G\_p &= G(I - E\_{iI})^{-1}: \quad && M\_0 = (I + KG)^{-1} = S\_I\\\\\\ + G\_p &= (I - E\_{iO})^{-1} G: \quad && M\_0 = (I + GK)^{-1} = S +\end{alignat\*} + +Using the theorem to check RS for unstructured perturbations +\\[ \text{RS} \quad \Leftrightarrow \quad \hnorm{W\_1 M\_0 W\_2(j\w)} < 1, \ \forall\w \\] + +For instance, for feedforward input uncertainty, we get +\\[ \text{RS}\ \forall G\_p = G(I + w\_I \Delta\_I), \hnorm{\Delta\_I} \le 1 \Leftrightarrow \hnorm{w\_I T\_I} < 1 \\] + +In general, **the unstructured uncertainty descriptions in terms of a single perturbation are not "tight"** (in the sense that at each frequency all complex perturbations satisfying \\(\maxsv(\Delta(j\w)) \le 1\\) may not be possible in practice). +Thus, the above RS-conditions are often **conservative**. +In order to get tighter condition we must use a tighter uncertainty description in terms of a block-diagonal \\(\Delta\\). + + +#### RS for Coprime Factor Uncertainty {#rs-for-coprime-factor-uncertainty} + +Robust stability bound in terms of the \\(\hinf\\) norm (\\(\text{RS}\Leftrightarrow\hnorm{M}<1\\)) are in general only tight when there is a single full perturbation block. +An "exception" to this is when the uncertainty blocks enter or exit from the same location in the block diagram, because they can then be stacked on top of each other or side-by-side, in an overall \\(\Delta\\) which is then full matrix. + +One important uncertainty description that falls into this category is the **coprime uncertainty description** shown in Fig. [fig:coprime_uncertainty](#fig:coprime_uncertainty), for which the set of plants is +\\[ G\_p = (M\_l + \Delta\_M)^{-1}(Nl + \Delta\_N), \quad \hnorm{[\Delta\_N, \ \Delta\_N]} \le \epsilon \\] +Where \\(G = M\_l^{-1} N\_l\\) is a left coprime factorization of the nominal plant. + +This uncertainty description is surprisingly **general**, it allows both zeros and poles to cross into the right-half plane, and has proven to be very useful in applications. + + + +{{< figure src="/ox-hugo/skogestad07_coprime_uncertainty.png" caption="Figure 30: Coprime Uncertainty" >}} + +Since we have no weights on the perturbations, it is reasonable to use a normalized coprime factorization of the nominal plant. +In any case, to test for RS we can rearrange the block diagram to match the \\(M\Delta\text{-structure}\\) with +\\[ \Delta = [\Delta\_N, \ \Delta\_M]; \quad M = -\begin{bmatrix}K\\I\end{bmatrix} (I + GK)^{-1} M\_l^{-1} \\] +And we get +\\[ \text{RS}\ \forall\ \hnorm{\Delta\_N, \ \Delta\_M} \le \epsilon \quad \Leftrightarrow \quad \hnorm{M} < 1/\epsilon \\] + +The coprime uncertainty description provides a good **generic uncertainty description** for cases where we do not use any specific a priori uncertainty information. +Note that the uncertainty magnitude is \\(\epsilon\\), so it is not normalized to be less than 1 in this case. +This is because this uncertainty description is most often used in a controller design procedure where the objective is to maximize the magnitude of the uncertainty \\(\epsilon\\) such that RS is maintained. + + +### RS with Structured Uncertainty: Motivation {#rs-with-structured-uncertainty-motivation} + +Consider now the presence of structured uncertainty, where \\(\Delta = \text{diag}\\{\Delta\_i\\}\\) is block-diagonal. +To test for robust stability, we rearrange the system into the \\(M\Delta\text{-structure}\\) and we have +\\[ \text{RS if } \maxsv(M(j\w)) < 1, \ \forall\w \\] + +We have here written "if" rather than "if and only if" since this condition is only sufficient for RS when \\(\Delta\\) has "no structure". +The question is whether we can take advantage of the fact that \\(\Delta = \text{diag}\\{\Delta\_i\\}\\) is structured to obtain an RS-condition which is tighter. +On idea is to make use of the fact that stability must be independent of scaling. + +To this effect, introduce the block-diagonal scaling matrix +\\[ D = \diag{d\_i I\_i} \\] +where \\(d\_i\\) is a scalar and \\(I\_i\\) is an identity matrix of the same dimension as the \\(i\\)'th perturbation block \\(\Delta\_i\\). + +Now rescale the inputs and outputs of \\(M\\) and \\(\Delta\\) by inserting the matrices \\(D\\) and \\(D^{-1}\\) on both sides as shown in Fig. [fig:block_diagonal_scalings](#fig:block_diagonal_scalings). +This clearly has no effect on stability. + + + +{{< figure src="/ox-hugo/skogestad07_block_diagonal_scalings.png" caption="Figure 31: Use of block-diagonal scalings, \\(\Delta D = D \Delta\\)" >}} + +Note that with the chosen form for the scalings we have for each perturbation block \\(\Delta\_i = d\_i \Delta\_i d\_i^{-1}\\), that is we have \\(\Delta = D \Delta D^{-1}\\). + +This means that we have +\\[ \text{RS if } \maxsv(DM(j\w)D^{-1}) < 1, \ \forall\w \\] + +
+
+ +This applies for any \\(D\\), and therefore the "most improved" (least conservative) RS-condition is obtained by minimizing at each frequency the scaled singular value and we have +\\[ \text{RS if } \min\_{D(\w) \in \mathcal{D}} \maxsv(D(\w)M(j\w)D(\w)^{-1}) < 1, \ \forall\w \\] +where \\(\mathcal{D}\\) is the set of block-diagonal matrices whose structure is compatible to that of \\(\Delta\\), i.e, \\(\Delta D = D \Delta\\). + +
+ +When \\(\Delta\\) is a full matrix, we must select \\(D = dI\\) and we have \\(\maxsv(D M D^{-1}) = \maxsv(M)\\), and we cannot improve the RS-condition. +However, when \\(\Delta\\) has structure, we get more degrees of freedom in \\(D\\) and \\(\maxsv(D M D^{-1})\\) may be significantly smaller than \\(\maxsv(M)\\). + + +### The Structured Singular Value {#the-structured-singular-value} + + +#### Definition {#definition} + +The structured singular value \\(\mu\\) is a function which provides a **generalization of the singular value** \\(\maxsv\\) and the **spectral radius** \\(\rho\\). +We will use \\(\mu\\) to get necessary and sufficient conditions for robust stability and also for robust performance. + +\\(\mu\\) can be explained as follow: + +> Find the smallest structured \\(\Delta\\) (measured in terms of \\(\maxsv(\Delta)\\)) which makes the matrix \\(I - M \Delta\\) singular; then \\(\mu(M) = 1/\maxsv(\Delta)\\). + +Mathematically +\\[ \mu(M)^{-1} \triangleq \min\_{\Delta}\\{\maxsv(\Delta) | \det(I-M\Delta) = 0 \text{ for struct. }\Delta\\} \\] +Clearly, \\(\mu(M)\\) depends not only on \\(M\\) but also on the **allowed structure** for \\(\Delta\\). This is sometimes shown explicitly by using the notation \\(\mu\_\Delta (M)\\).
+ +The above definition of \\(\mu\\) involves varying \\(\maxsv(\Delta)\\). However, we prefer to normalize \\(\Delta\\) such that \\(\maxsv(\Delta)\le1\\). We can do that by scaling \\(\Delta\\) by a factor \\(k\_m\\), and looking for the smallest \\(k\_m\\) which makes the matrix \\(I - k\_m M \Delta\\) singular. \\(\mu\\) is then the reciprocal of this small \\(k\_m\\): \\(\mu = 1/k\_m\\). This results in the following alternative definition of \\(\mu\\). + +
+
+ +Let \\(M\\) be a given complex matrix and let \\(\Delta = \diag{\Delta\_i}\\) denote a set of complex matrices with \\(\maxsv(\Delta) \le 1\\) and with a given block-diagonal structure. +The real non-negative function \\(\mu(M)\\), called the structured singular value, is defined by + +\begin{align\*} + \mu(M) \triangleq &(\min\\{ k\_m | \det(I - k\_m M \Delta) = 0\\\\\\ + &\text{for structured } \Delta, \maxsv(\Delta) \le 1 \\} )^{-1} +\end{align\*} + +If no such structured \\(\Delta\\) exists then \\(\mu(M) = 0\\) + +
+ +A value of \\(\mu = 1\\) means that there exists a perturbation with \\(\maxsv(\Delta) = 1\\) which is just large enough to make \\(I - M\Delta\\) singular. + +A larger value of \\(\mu\\) is "bad" as it means that a smaller perturbation makes \\(I - M\Delta\\) singular, whereas a smaller value of \\(\mu\\) is "good". + + +#### Remarks on the Definition of \\(\mu\\) {#remarks-on-the-definition-of--mu} + +1. The structured singular value was introduced by Doyle while at the same time, Safonov introduced the **Multivariable Stability Margin** \\(k\_m\\) for a diagonally perturbed system as the inverse of \\(\mu\\), that is \\(k\_m(M) = \mu(M)^{-1}\\). +2. Note that with \\(k\_m = 0\\) we obtain \\(I - k\_m M \Delta = I\\) which is clearly non-singular. + Thus, one possible way to obtain \\(\mu\\) numerically, is to start with \\(k\_m = 0\\), and gradually increase \\(k\_m\\) until we first find an allowed \\(\Delta\\) with \\(\maxsv(\Delta) = 1\\) such that \\(I-k\_mM\Delta\\) is singular. + + +#### Properties of \\(\mu\\) for Real and Complex \\(\Delta\\) {#properties-of--mu--for-real-and-complex--delta} + +1. \\(\mu(\alpha M) = \abs{\alpha} \mu(M)\\) for any real scalar \\(\alpha\\) +2. Let \\(\Delta = \diag{\Delta\_1, \Delta\_2}\\) be a block-diagonal perturbation and let \\(M\\) be partitioned accordingly. + Then \\[ \mu\_\Delta \ge \text{max} \\{\mu\_{\Delta\_1} (M\_{11}), \mu\_{\Delta\_2}(M\_{22}) \\} \\] + + +#### Properties of \\(\mu\\) for Complex Perturbations \\(\Delta\\) {#properties-of--mu--for-complex-perturbations--delta} + +1. For complex perturbations \\(\Delta\\) with \\(\maxsv(\Delta) \le 1\\) + + \begin{equation} + \tcmbox{\mu(M) = \max\_{\Delta, \maxsv(\Delta) \le 1} \rho(M\Delta)} + \end{equation} +2. \\(\mu(\alpha M) = \abs{\alpha} \mu(M)\\) for any (complex) scalar \\(\alpha\\) +3. For a full block complex perturbation \\(\Delta\\) + \\[ \mu(M) = \maxsv(M) \\] +4. \\(\mu\\) for complex perturbations is bounded by the spectral radius and the singular value + + \begin{equation} + \tcmbox{\rho(M) \le \mu(M) \le \maxsv(M)} + \end{equation} +5. **Improved lower bound**. + Defined \\(\mathcal{U}\\) as the set of all unitary matrices \\(U\\) with the same block diagonal structure as \\(\Delta\\). + Then for complex \\(\Delta\\) + + \begin{equation} + \tcmbox{\mu(M) = \max\_{U\in\mathcal{U}} \rho(MU)} + \end{equation} +6. **Improved upper bound**. + Defined \\(\mathcal{D}\\) as the set of all unitary matrices \\(D\\) that commute with \\(\Delta\\). + Then + + \begin{equation} + \tcmbox{\mu(M) = \min\_{D\in\mathcal{D}} \maxsv(DMD^{-1})} + \end{equation} + + +### Robust Stability with Structured Uncertainty {#robust-stability-with-structured-uncertainty} + +Consider stability of the \\(M\Delta\text{-structure}\\) for the case where \\(\Delta\\) is a set of norm-bounded block-diagonal perturbations. +From the determinant stability condition which applies to both complex and real perturbations, we get +\\[ \text{RS} \ \Leftrightarrow \ \det(I-M\Delta(j\w)) \ne 0, \ \forall\w,\, \forall\Delta, \, \\|\Delta\\|\_\infty \le 1 \\] +The problem is that this is only a "yes/no" condition. To find the factor \\(k\_m\\) by which the system is robustly stable, we scale the uncertainty \\(\Delta\\) by \\(k\_m\\), and look for the smallest \\(k\_m\\) which yields "borderline instability", namely +\\[ \det(I - k\_m M \Delta) = 0 \\] +From the definition of \\(\mu\\), this value is \\(k\_m = 1/\mu(M)\\), and we obtain the following necessary and sufficient condition for robust stability. + +
+
+ +Assume that the nominal system \\(M\\) and the perturbations \\(\Delta\\) are stable. +Then the \\(M\Delta\text{-system}\\) is stable for all allowed perturbations with \\(\maxsv(\Delta)\le 1, \ \forall\w\\) if on only if + +\begin{equation} + \mu(M(j\w)) < 1, \ \forall \omega +\end{equation} + +
+ + +##### What do \\(\mu \ne 1\\) and skewed-\\(\mu\\) mean? {#what-do--mu-ne-1--and-skewed--mu--mean} + +A value of \\(\mu = 1.1\\) for robust stability means that **all** the uncertainty blocks must be decreased in magnitude by a factor 1.1 in order to guarantee stability. + +But if we want to keep some of the uncertainty blocks fixed, how large can one particular source of uncertainty be before we get instability? +We define this value as \\(1/\mu^s\\), where \\(\mu^s\\) is called skewed-\\(\mu\\). We may view \\(\mu^s(M)\\) as a generalization of \\(\mu(M)\\). + +
+
+ +Let \\(\Delta = \diag{\Delta\_1, \Delta\_2}\\) and assume we have fixed \\(\norm{\Delta\_1} \le 1\\) and we want to find how large \\(\Delta\_2\\) can be before we get instability. +The solution is to select +\\[ K\_m = \begin{bmatrix}I & 0 \\ 0 & k\_m I\end{bmatrix} \\] +and look at each frequency for the smallest value of \\(k\_m\\) which makes \\(\det(I - K\_m M \Delta) = 0\\) and we have that skewed-\\(\mu\\) is +\\[ \mu^s(M) \triangleq 1/k\_m \\] + +
+ +Note that to compute skewed-\\(\mu\\) we must first define which part of the perturbations is to be constant. + + +### Robust Performance {#robust-performance} + + +#### Testing RP using \\(\mu\\) {#testing-rp-using--mu} + +To test for RP, we first "pull out" the uncertain perturbations and rearrange the uncertain system into the \\(N\Delta\text{-form}\\). +Our RP-requirement, is that the \\(\hinf\\) norm of the transfer function \\(F = F\_u(N, \Delta)\\) remains less than \\(1\\) for all allowed perturbations. +This may be tested exactly by computing \\(\mu(N)\\). + +
+
+ +Rearrange the uncertain system into the \\(N\Delta\text{-structure}\\). Assume nominal stability such that \\(N\\) is stable. +Then + +\begin{align\*} +\text{RP} \ &\stackrel{\text{def}}{\Longleftrightarrow} \ \hnorm{F} = \hnorm{F\_u(N, \Delta)} < 1, \ \forall \hnorm{\Delta} < 1 \\\\\\ + &\Longleftrightarrow \ \mu\_{\hat{\Delta}}(N(j\w)) < 1, \ \forall\w +\end{align\*} + +where \\(\mu\\) is computed with respect to the structure +\\[ \hat{\Delta} = \begin{bmatrix}\Delta & 0 \\ 0 & \Delta\_P\end{bmatrix} \\] +and \\(\Delta\_P\\) is a full complex perturbation with the same dimensions as \\(F^T\\). + +
+ +Some remarks on the theorem: + +1. Condition \\(\mu\_{\hat{\Delta}}(N(j\w)) < 1, \ \forall\w\\) allows us to test if \\(\hnorm{F} < 1\\) for all possible \\(\Delta\\) without having to test each \\(\Delta\\) individually. Essential, \\(\mu\\) is defined such that it directly addresses the worst case +2. The \\(\mu\text{-condition}\\) for RP involves the enlarged perturbation \\(\hat{\Delta} = \diag{\Delta, \Delta\_P}\\). + Here \\(\Delta\\), which itself may be a block diagonal matrix, represents the true uncertainty, whereas \\(\Delta\_P\\) is a full complex matrix stemming from the \\(\hinf\\) norm performance specification +3. Since \\(\hat{\Delta}\\) always has structure, the use of \\(\hinf\\) norm, \\(\hnorm{N} < 1\\), is generally conservative for robust performance + + +#### Summary of \\(\mu\text{-conditions}\\) for NP, RS and RP {#summary-of--mu-text-conditions--for-np-rs-and-rp} + +
+
+ +Rearrange the uncertain system into the \\(N\Delta\text{-structure}\\) where the block-diagonal perturbation satisfy \\(\hnorm{\Delta} \le 1\\). +Introduce +\\[ F = F\_u(N, \Delta) = N\_{22} + N\_{21}\Delta(I - N\_{11} \Delta)^{-1} N\_{12} \\] +Let the performance requirement be \\(\hnorm{F} \le 1\\). + +\begin{align\*} + \text{NS} \ &\Leftrightarrow \ N \text{ (internally) stable} \\\\\\ + \text{NP} \ &\Leftrightarrow \ \text{NS and } \maxsv(N\_{22}) = \mu\_{\Delta\_P} < 1, \ \forall\w \\\\\\ + \text{RS} \ &\Leftrightarrow \ \text{NS and } \mu\_\Delta(N\_{11}) < 1, \ \forall\w \\\\\\ + \text{RP} \ &\Leftrightarrow \ \text{NS and } \mu\_{\tilde{\Delta}}(N) < 1, \ \forall\w, \ \tilde{\Delta} = \begin{bmatrix}\Delta & 0 \\ 0 & \Delta\_P\end{bmatrix} +\end{align\*} + +
+ +Here \\(\Delta\\) is a block-diagonal matrix, whereas \\(\Delta\_P\\) is always a full complex matrix. + +Although the structured singular value is not a norm, it is sometimes convenient to refer to the peak \\(\mu\text{-value}\\) as the "\\(\Delta\text{-norm}\\)". +For a stable rational transfer matrix \\(H(s)\\), with an associated block structure \\(\Delta\\), we therefore define + +\begin{equation} + \tcmbox{\left\\|H(s)\right\\|\_\Delta \triangleq \max\_{\w} \mu\_\Delta (H(j\w))} +\end{equation} + +For a nominal stable system, we then have + +\begin{align\*} + \text{NP} \ &\Leftrightarrow \ \hnorm{N\_{22}} < 1 \\\\\\ + \text{RS} \ &\Leftrightarrow \ \left\\|N\_{11}\right\\|\_\Delta < 1 \\\\\\ + \text{RP} \ &\Leftrightarrow \ \left\\|N\right\\|\_{\tilde{\Delta}} < 1 +\end{align\*} + + +#### Worst-case Performance and Skewed-\\(\mu\\) {#worst-case-performance-and-skewed--mu} + +Assume we have a system for which the peak \\(\mu\text{-value}\\) for RP is \\(1.1\\). What does this mean? +The definition of \\(\mu\\) tells us that our RP-requirement would be satisfied exactly if we reduced **both** the performance requirement and the uncertainty by a factor of \\(1.1\\). +So \\(\mu\\) does not directly give us the worst-case performance \\(\max\_{\Delta} \maxsv(F(\Delta))\\). + +To find the worst-case weighted performance for a given uncertainty, one needs to keep the magnitude of the perturbation fixed (\\(\maxsv(\Delta) \le 1\\)), that is, **we must compute the skewed-\\(\mu\\)** of \\(N\\). +We have, in this case +\\[ \max\_{\maxsv(\Delta) \le 1} \maxsv(F\_l(N, \Delta)(j\w)) = \mu^s (N(j\w)) \\] + +To find \\(\mu^s\\) numerically, we scale the performance part of \\(N\\) by a factor \\(k\_m = 1/\mu^s\\) and iterate on \\(k\_m\\) until \\(\mu = 1\\). +That is, at each frequency skewed-\\(\mu\\) is the value \\(\mu^s(N)\\) which solves +\\[ \mu(K\_mN) = 1, \quad K\_m = \begin{bmatrix}I & 0 \\ 0 & 1/\mu^s\end{bmatrix} \\] +Note that \\(\mu\\) underestimate how bad or good the actual worst case performance is. This follows because \\(\mu^s(N)\\) is always further from 1 than \\(\mu(N)\\). + + +### Application: RP with Input Uncertainty {#application-rp-with-input-uncertainty} + +We will now consider in some detail the case of multiplicative input uncertainty with performance defined in terms of weighted sensitivity (Fig. [fig:input_uncertainty_set_feedback_weight](#fig:input_uncertainty_set_feedback_weight)). + +The performance requirement is then +\\[ \text{RP} \quad \stackrel{\text{def}}{\Longleftrightarrow} \quad \hnorm{w\_P (I + G\_p K)^{-1}} < 1, \quad \forall G\_p \\] +where the set of plant is given by +\\[ G\_p = G (I + w\_I \Delta\_I), \quad \hnorm{\Delta\_I} \le 1 \\] + +Here \\(w\_p(s)\\) and \\(w\_I(s)\\) are scalar weights, so the performance objective is the same for all the outputs, and the uncertainty is the same for all the inputs. + +In this section, we will: + +1. Find the interconnection matrix \\(N\\) for this problem +2. Consider the SISO case, so that useful connections can be made with results for SISO systems +3. Consider a multivariable distillation process +4. Find some simple bounds on \\(\mu\\) and discuss the role of the condition number +5. Make comparisons with the case where the uncertainty is located at the output + + +#### Interconnection Matrix {#interconnection-matrix} + +On rearranging the system into the \\(N\Delta\text{-structure}\\), we get + +\begin{equation} + N = \begin{bmatrix} - w\_I T\_I & - w\_I K S \\ w\_p S G & w\_p S \end{bmatrix} +\end{equation} + +where \\(T\_I = KG(I + KG)^{-1}\\), \\(S = (I + GK)^{-1}\\). +For simplicity, we can omit the negative signs. + +For a given controller \\(K\\) we can now test for NS, NP, RS and RP. + + +#### RP with Input Uncertainty for SISO System {#rp-with-input-uncertainty-for-siso-system} + +For a SISO system with \\(N\\) as described above: + +\begin{align\*} + \text{NS} &\Leftrightarrow S,\ SG,\ KS, \text{ and } T\_I \text{ are stable} \\\\\\ + \text{NP} &\Leftrightarrow |w\_P S| < 1, \quad \forall \omega \\\\\\ + \text{RS} &\Leftrightarrow |w\_I T\_I| < 1, \quad \forall \omega \\\\\\ + \text{RP} &\Leftrightarrow |w\_P S| + |w\_I T\_I| < 1, \quad \forall \omega +\end{align\*} + +Robust performance optimization, in terms of weighted sensitivity with multiplicative uncertainty for a SISO system, thus involves minimizing the peak value of \\(\mu(N) = |w\_I T| + |w\_P S|\\). +This may be solved using DK-iteration. +A closely related problem, which is easier to solve is to minimize the peak value (\\(\mathcal{H}\_\infty\\) norm) of the mixed sensitivity matrix: +\\[ N\_\text{mix} = \begin{bmatrix} w\_P S \\ w\_I T \end{bmatrix} \\] + +At each frequency, \\(\mu(N)\\) differs from and \\(\bar{\sigma}(N\_\text{mix})\\) by at most a factor \\(\sqrt{2}\\). +Thus, minimizing \\(\\| N\_\text{mix} \\|\_\infty\\) is close to optimizing robust performance in terms of \\(\mu(N)\\). + + +#### Robust Performance for \\(2 \times 2\\) Distillation Process {#robust-performance-for--2-times-2--distillation-process} + +Consider a distillation process and a corresponding inverse-based controller: +\\[ G(s) = \frac{1}{75s + 1} \begin{bmatrix} 87.8 & -86.4 \\ 108.2 & -109.6 \end{bmatrix} ; \quad K(s) = \frac{0.7}{s} G(s)^{-1} \\] + +The controller provides a nominally decoupled system: +\\[ L = l I,\ S = \epsilon I \text{ and } T = t I \\] +where +\\[ l = \frac{0.7}{s}, \ \epsilon = \frac{s}{s + 0.7}, \ t = \frac{0.7}{s + 0.7} \\] + +The following weights for uncertainty and performance are used: +\\[ w\_I(s) = \frac{s + 0.2}{0.5s + 1}; \quad w\_P(s) = \frac{s/2 + 0.05}{s} \\] + +We now test for NS, NP, RS and RP. + + +##### NS {#ns} + +with \\(G\\) and \\(K\\) as defined, we find that \\(S\\), \\(SG\\), \\(KS\\) and \\(T\_I\\) are stable, so the system is nominally stable. + + +##### NP {#np} + +with the decoupling controller we have: +\\[ \bar{\sigma}(N\_{22}) = \bar{\sigma}(w\_P S) = \left|\frac{s/2 + 0.05}{s + 0.7}\right| \\] +and we see from Fig. [fig:mu_plots_distillation](#fig:mu_plots_distillation) that the NP-condition is satisfied. + + + +{{< figure src="/ox-hugo/skogestad07_mu_plots_distillation.png" caption="Figure 32: \\(\mu\text{-plots}\\) for distillation process with decoupling controller" >}} + + +##### RS {#rs} + +In this case \\(w\_I T\_I = w\_I T\\) is a scalar times the identity matrix: +\\[ \mu\_{\Delta\_I}(w\_I T\_I) = |w\_I t| = \left|0.2 \frac{5s + 1}{(0.5s + 1)(1.43s + 1)}\right| \\] +and we see from Fig. [fig:mu_plots_distillation](#fig:mu_plots_distillation) that RS is satisfied. + +The peak value of \\(\mu\_{\Delta\_I}(M)\\) is \\(0.53\\) meaning that we may increase the uncertainty by a factor of \\(1/0.53 = 1.89\\) before the worst case uncertainty yields instability. + + +##### RP {#rp} + +Although the system has good robustness margins and excellent nominal performance, the robust performance is poor. +This is shown in Fig. [fig:mu_plots_distillation](#fig:mu_plots_distillation) where the \\(\mu\text{-curve}\\) for RP was computed numerically using \\(\mu\_{\hat{\Delta}}(N)\\), with \\(\hat{\Delta} = \text{diag}\\{\Delta\_I, \Delta\_P\\}\\) and \\(\Delta\_I = \text{diag}\\{\delta\_1, \delta\_2\\}\\). +The peak value is close to 6, meaning that even with 6 times less uncertainty, the weighted sensitivity will be about 6 times larger than what we require. + + +#### Robust Performance and the Condition Number {#robust-performance-and-the-condition-number} + +We here consider the relationship between \\(\mu\\) for RP and the condition number of the plant or of the controller. +We consider unstructured multiplicative uncertainty (i.e. \\(\Delta\_I\\) is a full matrix) and performance is measured in terms of the weighted sensitivity. +With \\(N\\) given by [eq:n_delta_structure_clasic](#eq:n_delta_structure_clasic), we have: +\\[ \overbrace{\mu\_{\tilde{\Delta}}(N)}^{\text{RP}} \le [ \overbrace{\bar{\sigma}(w\_I T\_I)}^{\text{RS}} + \overbrace{\bar{\sigma}(w\_P S)}^{\text{NP}} ] (1 + \sqrt{k}) \\] +where \\(k\\) is taken as the smallest value between the condition number of the plant and of the controller: +\\[ k = \text{min}(\gamma(G), \gamma(K)) \\] + +We see that with a "round" controller (i.e. one with \\(\gamma(K) = 1\\)), there is less sensitivity to uncertainty. +On the other hand, we would expect \\(\mu\\) for RP to be large if we used an inverse-based controller for a plant with large condition number, since then \\(\gamma(K) = \gamma(G)\\) is large. + + +#### Comparison with Output Uncertainty {#comparison-with-output-uncertainty} + +Consider output multiplicative uncertainty of magnitude \\(w\_O(j\omega)\\). +In this case, we get the interconnection matrix +\\[ N = \begin{bmatrix} w\_O T & w\_O T \\ w\_P S & w\_P S \end{bmatrix} \\] +and for any structure of the uncertainty, \\(\mu(N)\\) is bounded as follows: +\\[ \bar{\sigma}\begin{bmatrix} w\_O T \\ w\_P S\end{bmatrix} \le \overbrace{\mu(N)}^{\text{RP}} \le \sqrt{2}\ \bar{\sigma} \overbrace{\underbrace{\begin{bmatrix} w\_O T \\ w\_P S \end{bmatrix}}\_{\text{NP}}}^{\text{RS}} \\] +This follows since the uncertainty and performance blocks both enter at the output and that the difference between bounding the combined perturbations \\(\bar{\sigma}[\Delta\_O \ \Delta\_P]\\) and the individual perturbations \\(\bar{\sigma}(\Delta\_O)\\) and \\(\bar{\sigma}(\Delta\_P)\\) is at most a factor \\(\sqrt{2}\\). +Thus, we "automatically" achieve RP if we satisfy separately NP and RS. +Multiplicative output uncertainty then poses no particular problem for performance. + + +### \\(\mu\text{-synthesis}\\) and DK-iteration {#mu-text-synthesis--and-dk-iteration} + +The structured singular value \\(\mu\\) is a very powerful tool for the analysis of robust performance with a given controller. However, one may also seek to **find the controller that minimizes** a given \\(\mu\text{-condition}\\): this is the \\(\mu\text{-synthesis}\\) problem. + + +#### DK-iteration {#dk-iteration} + +At present, there is no direct method to synthesize a \\(\mu\text{-optimal}\\) controller. +However, for complex perturbations, a method known as **DK-iteration** is available. +It combines \\(\hinf\\) synthesis and \\(\mu\text{-analysis}\\) and often yields good results.
+ +The starting point is the upper bound on \\(\mu\\) in terms of the scaled singular value + +\begin{equation} + \mu(N) \le \min\_{D \in \mathcal{D}} \maxsv(D N D^{-1}) +\end{equation} + +The idea is to find the controller that minimizes the peak value over frequency of this upper bound, namely + +\begin{equation} + \min\_{K} \left( \min\_{D \in \mathcal{D}} \hnorm{D N(K) D^{-1} } \right) +\end{equation} + +by alternating between minimizing \\(\hnorm{DN(K)D^{-1}}\\) with respect to either \\(K\\) or \\(D\\) (while holding the other fixed).
+ +To start the iterations, one selects an initial stable rational transfer matrix \\(D(s)\\) with appropriate structure. +The identity matrix is often a good initial choice for \\(D\\) provided the system has been reasonably scaled for performance.
+ +
+
+ +1. **K-step**. Synthesize an \\(\hinf\\) controller for the scaled problem, \\(\min\_{K} \hnorm{DN(K)D^{-1}}\\) with fixed \\(D(s)\\) +2. **D-step**. Find \\(D(j\w)\\) to minimize at each frequency \\(\maxsv(DND^{-1}(j\w))\\) with fixed \\(N\\) +3. Fit the magnitude of each element of \\(D(j\w)\\) to a stable and minimum phase transfer function \\(D(s)\\) and go to step 1 + +
+ +The iteration may continue until satisfactory performance is achieve, \\(\hnorm{DND^{-1}} < 1\\) or until the \\(\hinf\\) norm no longer decreases. +One fundamental problem with this approach is that although each of the minimization steps are convex, **joint convexity is not guaranteed**. +Therefore, the iterations may converge to a **local minimum**.
+ +The order of the controller resulting from each iteration is equal to the number of the states in the plant \\(G(s)\\) plus the number of states in the weights plus twice the number of state in \\(D(s)\\). +The obtain \\(\mu\text{-optimal}\\) controller will usually be of **high order** and will have a flat \\(\mu\text{-curve}\\) until some high frequency.
+ +The DK-iteration depends heavily on optimal solutions for steps 1 and 2, and also on good fits in step 3. +We usually **prefers to have a low-order fit** in step 3 as it will reduces the order of the \\(\hinf\\) problem which usually improves the numerical properties of the optimization. +In some cases, the iterations converge slowly, the \\(\mu\text{-value}\\) can even increase. This may be caused by numerical problems and one may consider going back to the initial problem and **rescaling the inputs and outputs**. + + +#### Adjusting the Performance Weight {#adjusting-the-performance-weight} + +If \\(\mu\\) at a given frequency is different from 1, then the interpretation is that at this frequency we can tolerate \\(1/\mu\\) times more uncertainty and still satisfy our performance objective with a margin of \\(1/\mu\\). +In \\(\mu\text{-synthesis}\\), the designer will usually adjust some parameter in the performance or uncertainty weights until the weight of the peak \\(\mu\text{-value}\\) is close to 1.
+ +Sometimes, uncertainty is fixed and we effectively optimize worst-cast performance by adjusting a parameter in the performance weight. +Consider the performance weight +\\[ w\_p(s) = \frac{s/M + \w\_B^\*}{s + \w\_B^\* A} \\] +where we want to keep \\(M\\) constant and find the high achievable bandwidth frequency \\(\w\_B^\*\\). +The optimization problem becomes +\\[ \text{max} \abs{\w\_B^\*} \quad \text{such that} \quad \mu(N) < 1, \ \forall\w \\] +where \\(N\\), the interconnection matrix for the RP-problem, depends on \\(\w\_B^\*\\). This may be implemented as an **outer loop around the DK-iteration**. + + +#### Fixed Structure Controller {#fixed-structure-controller} + +Sometimes it is desirable to find a low-order controller with a given structure. +This may be achievable by numerical optimization where \\(\mu\\) is minimized with respect to the controller parameters. +This problem here is that the optimization is not generally convex in the parameters. +Sometimes it helps to switch the optimization between minimizing the peak of \\(\mu\\) and minimizing the integral square deviation of \\(\mu\\) away from \\(k\\) (i.e. \\(\normtwo{\mu(j\w) - k}\\)) where \\(k\\) is usually close to 1. +The latter is an attempt to "flatten out" \\(\mu\\). + + +#### Example: \\(\mu\text{-synthesis}\\) with DK-iteration {#example--mu-text-synthesis--with-dk-iteration} + +For simplicity, we will consider again the case of multiplicative uncertainty and performance defined in terms of weighted sensitivity. +The uncertainty weight \\(w\_I I\\) and performance weight \\(w\_P I\\) are shown graphically in Fig. [fig:weights_distillation](#fig:weights_distillation). + + + +{{< figure src="/ox-hugo/skogestad07_weights_distillation.png" caption="Figure 33: Uncertainty and performance weights" >}} + +The objective is to minimize the peak value of \\(\mu\_{\tilde{\Delta}}(N)\\), \\(\tilde{\Delta} = \text{diag}\\{\Delta\_I, \Delta\_P\\}\\). +\\(\Delta\_I\\) is a diagonal \\(2 \times 2\\) matrix representing the diagonal input uncertainty and \\(\Delta\_P\\) is a full \\(2 \times 2\\) matrix representing the performance specifications. + +First, the generalized plant \\(P\\) is constructed which includes the plant model, the uncertainty weight and the performance weight. +Then the block structure is defined, it consists of two \\(1 \times 1\\) blocks to represent \\(\Delta\_I\\) and a \\(2 \times 2\\) block to represent \\(\Delta\_P\\). +The scaling matrix \\(D\\) for \\(DND^{-1}\\) then has the structure \\(D = \text{diag}\\{d\_1, d\_2, d\_3I\_2\\}\\). We select \\(d\_3 = 1\\) and as initial scalings we select \\(d\_1^0 = d\_2^0 = 1\\). +\\(P\\) is then scaled with the matrix \\(\text{diag}\\{D, I\_2\\}\\) where \\(I\_2\\) is associated with the inputs and outputs from the controller (we do not want to scale the controller). + +- Iteration No. 1. + Step 1: with the initial scalings, the \\(\mathcal{H}\_\infty\\) synthesis produced a 6 state controller (2 states from the plant model and 2 from each of the weights). + Step 2: the upper \\(\mu\text{-bound}\\) is shown in Fig. [fig:dk_iter_mu](#fig:dk_iter_mu). + Step 3: the frequency dependent \\(d\_1(\omega)\\) and \\(d\_2(\omega)\\) from step 2 are fitted using a 4th order transfer function shown in Fig. [fig:dk_iter_d_scale](#fig:dk_iter_d_scale) +- Iteration No. 2. + Step 1: with the 8 state scalings \\(D^1(s)\\), the \\(\mathcal{H}\_\infty\\) synthesis gives a 22 state controller. + Step 2: This controller gives a peak value of \\(\mu\\) of \\(1.02\\). + Step 3: the scalings are only slightly changed +- Iteration No. 3. + Step 1: The \\(\mathcal{H}\_\infty\\) norm is only slightly reduced. We thus decide the stop the iterations. + + + +{{< figure src="/ox-hugo/skogestad07_dk_iter_mu.png" caption="Figure 34: Change in \\(\mu\\) during DK-iteration" >}} + + + +{{< figure src="/ox-hugo/skogestad07_dk_iter_d_scale.png" caption="Figure 35: Change in D-scale \\(d\_1\\) during DK-iteration" >}} + +The final \\(\mu\text{-curves}\\) for NP, RS and RP with the controller \\(K\_3\\) are shown in Fig. [fig:mu_plot_optimal_k3](#fig:mu_plot_optimal_k3). +The objectives of RS and NP are easily satisfied. +The peak value of \\(\mu\\) is just slightly over 1, so the performance specification \\(\bar{\sigma}(w\_P S\_p) < 1\\) is almost satisfied for all possible plants. + + + +{{< figure src="/ox-hugo/skogestad07_mu_plot_optimal_k3.png" caption="Figure 36: \\(mu\text{-plots}\\) with \\(\mu\\) \"optimal\" controller \\(K\_3\\)" >}} + +To confirm that, 6 perturbed plants are used to compute the perturbed sensitivity functions shown in Fig. [fig:perturb_s_k3](#fig:perturb_s_k3). + + + +{{< figure src="/ox-hugo/skogestad07_perturb_s_k3.png" caption="Figure 37: Perturbed sensitivity functions \\(\bar{\sigma}(S^\prime)\\) using \\(\mu\\) \"optimal\" controller \\(K\_3\\). Lower solid line: nominal plant. Upper solid line: worst-case plant" >}} + + +### Further Remarks on \\(\mu\\) {#further-remarks-on--mu} + +For complex perturbations, the scaled singular value \\(\maxsv(DND^{-1})\\) is a tight upper bound on \\(\mu(N)\\) in most cases, and minimizing the upper bound \\(\hnorm{DND^{-1}}\\) form the basis for the DK-iteration. + +The use of constant D-scales (\\(D\\) is not allowed to vary with frequency), provides a necessary and sufficient condition for robustness to arbitrary fast time varying linear uncertainty. While such perturbations are unlikely in a practical situation, we know that this controller will work very well even for rapid changes in the plant. Moreover, the use of constant D-scales make the computation of \\(\mu\\) straightforward and solvable using LMIs. + + +### Conclusion {#conclusion} + +We have discussed how to represent uncertainty and how to analyze its effect on stability (RS) and performance (RP) using the **structured singular value** \\(\mu\\).
+ +To analyze robust stability of an uncertain system, we make use of the \\(M\Delta\text{-structure}\\) where \\(M\\) represents the transfer function for the "new" feedback part generated by the uncertainty. +From the small gain theorem +\\[ \tcmbox{RS \quad \Leftarrow \quad \maxsv(M) < 1, \ \forall\w} \\] +which is tight (necessary and sufficient) for the special case where at each frequency any complex \\(\Delta\\) satisfying \\(\maxsv(\Delta) \le 1\\) is allowed. +More generally, the **tight condition is** +\\[ \tcmbox{RP \quad \Leftrightarrow \quad \mu(M) < 1, \ \forall\w} \\] +where \\(\mu(M)\\) is the **structured singular value**. The calculation of \\(\mu\\) makes use of the fact that \\(\Delta\\) has a given block-diagonal structure, where certain blocks may also be real (e.g. to handle parametric uncertainty).
+ +We defined robust performance as \\(\hnorm{F\_l(N, \Delta)} < 1\\) for all allowed \\(\Delta\\). +Since we used the \\(\hinf\\) norm in both the representation of uncertainty and the definition of performance, we found that RP could be viewed as a special case of RS, and we derived +\\[ \tcmbox{RS \quad \Leftrightarrow \quad \mu(N) < 1, \ \forall\w} \\] +where \\(\mu\\) is computed with respect to the **block-diagonal structure** \\(\diag{\Delta, \Delta\_P}\\). +Here \\(\Delta\\) represents the uncertainty and \\(\Delta\_P\\) is a fictitious full uncertainty block representing the \\(\hinf\\) performance bound.
+ +There are **two main approaches to getting a robust design**: + +1. We aim to make the system robust to some **"general" class of uncertainty** which we do not explicitly model. + For SISO systems, the classical gain and phase margins and the peaks of \\(S\\) and \\(T\\) provide useful robustness measures. + For MIMO systems, normalized coprime factor uncertainty provides a good general class of uncertainty, and the associated Glover-McFlarlane \\(\hinf\\) loop-shaping design procedure has proved itself very useful in applications +2. We **explicitly model and quantify the uncertainty** in the plant and aim to make the system robust to this specific uncertainty. + Potentially, it yields better designs, but it may require a much larger effort in terms of uncertainty modelling, especially if parametric uncertainty is consider. + Analysis and in particular, synthesis using \\(\mu\\) can be very involved + +In applications, it is therefore recommended to **start with the first approach**, at least for design. +The robust stability and performance is then analyzed in simulations and using the structured singular value, for example, by considering first simple sources of uncertainty such as multiplicative input uncertainty. +One then iterates between design and analysis until a satisfactory solution is obtained. +If resulting control performance is not satisfactory, one may switch to the second approach. + + +##### Practical \\(\mu\text{-synthesis}\\) in practice: {#practical--mu-text-synthesis--in-practice} + +1. Because of the effort involved in deriving detailed uncertainty descriptions, and the subsequent complexity in synthesizing controllers, the rule is to **start simple** with a crude uncertainty description, and then to see whether the performance specifications can be met. Only if they can't, one should consider more detailed uncertainty descriptions such as parametric uncertainty +2. The use of \\(\mu\\) implies a worst-case analysis, so one should be **careful about including too many sources of uncertainty**, noise and disturbances - otherwise it becomes very unlikely for the worst case to occur, and the resulting analysis and design may be **unnecessarily conservative** +3. There is always uncertainty with respect to the inputs and outputs, so **it is generally sage to include diagonal input and output uncertainty**. The relative multiplicative form is very convenient in this case +4. \\(\mu\\) is most commonly used for analysis. If \\(\mu\\) is used for synthesis, then we recommend that you keep the uncertainty fixed and adjust the parameters in the performance weight until \\(\mu\\) is close to 1 + + +## Controller Design {#controller-design} + + + + +### Trade-offs in MIMO Feedback Design {#trade-offs-in-mimo-feedback-design} + +The shaping of multivariable transfer functions is based on the idea that a satisfactory definition of gain for a matrix transfer function is given by the **singular values**. +By multivariable transfer function shaping, therefore, we mean the shaping of the singular values of appropriate specified transfer functions such as the loop transfer function of one or more closed-loop transfer functions.
+ +The classical loop-shaping ideas can be further generalized to MIMO systems by considering the singular values. + +Consider the one degree-of-freedom system as shown in Fig. [fig:classical_feedback_small](#fig:classical_feedback_small). +We have the following important relationships: + +\begin{subequations} + \begin{align} + y(s) &= T(s) r(s) + S(s) d(s) - T(s) n(s) \\\\\\ + u(s) &= K(s) S(s) \big(r(s) - n(s) - d(s) \big) + \end{align} +\end{subequations} + + + +{{< figure src="/ox-hugo/skogestad07_classical_feedback_small.png" caption="Figure 38: One degree-of-freedom feedback configuration" >}} + +
+
+ +1. For disturbance rejection make \\(\maxsv(S)\\) small +2. For noise attenuation make \\(\maxsv(T)\\) small +3. For reference tracking make \\(\maxsv(T) \approx \minsv(T) \approx 1\\) +4. For control energy reduction make \\(\maxsv(KS)\\) small +5. For robust stability in presence of an additive perturbation (\\(G\_p = G + \Delta\\)) make \\(\maxsv(KS)\\) small +6. For robust stability in presence of a multiplicative output perturbation (\\(G\_p = (I + \Delta) G\\)) make \\(\maxsv(T)\\) small + +
+ +The closed-loop requirements cannot all be satisfied simultaneously. +Feedback design is therefore a **trade-off over frequency of conflicting objectives**. +This is not always as difficult as it sounds because the frequency range over which the objectives are important can be quite different.
+ +In classical loop shaping, it is the magnitude of the open-loop transfer function \\(L = GK\\) which is shaped, whereas the above requirements are all in terms of closed-loop transfer functions. +However, we have that +\\[ \minsv(L) - 1 \le \frac{1}{\maxsv(S)} \le \minsv(L) + 1 \\] +from which we see that \\(\maxsv(S) \approx 1/\minsv(L)\\) at frequencies where \\(\minsv(L)\\) is much larger than \\(1\\). +Furthermore, from \\(T = L(I+L)^{-1}\\) it follows that \\(\maxsv(T) \approx \maxsv(L)\\) at frequencies where \\(\maxsv(L)\\) is much smaller than \\(1\\). + +Thus, over specified frequency ranges, it is relatively easy to approximate the closed-loop requirements by open-loop objectives. + +
+
+ +1. For disturbance rejection make \\(\minsv(GK)\\) large +2. For noise attenuation make \\(\maxsv(GK)\\) small +3. For reference tracking make \\(\minsv(GK)\\) large +4. For control energy reduction make \\(\maxsv(K)\\) small +5. For robust stability in presence of an additive perturbation make \\(\maxsv(K)\\) small +6. For robust stability in presence of an multiplicative output perturbation make \\(\maxsv(GK)\\) small + +
+ +Typically, the open-loop requirements 1 and 3 are valid and important at low frequencies \\(0 \le \omega \le \omega\_l \le \omega\_B\\), while conditions 2, 4, 5 and 6 are conditions which are valid and important at high frequencies \\(\omega\_B \le \omega\_h \le \omega \le \infty\\), as illustrated in Fig. [fig:design_trade_off_mimo_gk](#fig:design_trade_off_mimo_gk). + + + +{{< figure src="/ox-hugo/skogestad07_design_trade_off_mimo_gk.png" caption="Figure 39: Design trade-offs for the multivariable loop transfer function \\(GK\\)" >}} + +The control engineer must design \\(K\\) such that \\(\minsv(GK)\\) lies above a performance boundary for all \\(\omega\\) up to \\(\omega\_l\\), and such that \\(\maxsv(GK)\\) lies below a robustness boundary for all \\(\omega\\) above \\(\omega\_h\\).
+ +Shaping the singular values of \\(GK\\) by selecting \\(K\\) is relatively easy task, but to do this in a way which also guarantees closed-loop stability is in general difficult as **closed-loop stability cannot be determined from open-loop singular values**.
+ +For SISO systems, closed-loop stability is closely related to the open-loop roll-off rate from high to low gain at the crossover (which is in practice less than \\(\SI{40}{\decibel\per dec}\\)). +An immediate consequence of this is that there is a lower limit to the difference between \\(\omega\_h\\) and \\(\omega\_l\\). + +For MIMO systems, a similar gain/phase relationship holds in the crossover frequency region, but this is in terms of roll-off rate of the magnitude of the eigenvalues of \\(GK\\) and not the singular values. +The stability constraint is therefore more difficult to handle. + + +### LQG Control {#lqg-control} + +LQG control was developed and successfully applied for aerospace problems where accurate plants are available. +For other control problems, it was not easy, and the **assumption of white noise disturbance** is not always relevant. +As a result, LQG designs were sometimes not robust enough to be used in practice.
+ +It is assumed that the plant dynamics are linear and known, and that the measurement noise and disturbance signals are stochastic with known statistical properties: + +\begin{align\*} + \dot{x} &= A x + B u + w\_d \\\\\\ + y &= C x + D u + w\_n +\end{align\*} + +with \\(w\_d\\) and \\(w\_n\\) are the disturbance and measurement noise which are assumed to be uncorrelated zero-mean Gaussian stochastic processes with constant power spectral density matrices \\(W\\) and \\(V\\) respectively. + +
+
+ +The LQG control problem is to find the optimal control \\(u(t)\\) that minimize: +\\[J = E \bigg\\{ \lim\_{T\rightarrow \infty} \frac{1}{T} \int\_0^T \big[ x^T Q x + u^T R u \big] dt \bigg\\} \\] +Where \\(Q\\) and \\(R\\) are appropriately chosen constant **weighting matrices** (design parameters) such that \\(Q = Q^T \ge 0\\) and \\(R = R^T > 0\\). + +
+ +The solution to the LQG problem, known as the **Separation Theorem**, is separated into **two problems**. + +It consists of first determining the **optimal control** to a deterministic **LQR problem** (LQG without \\(w\_d\\) and \\(w\_n\\)). +The solution to this problem is a state feedback law + +\begin{equation} + \tcmbox{u(t) = -K\_r x(t)} +\end{equation} + +where \\(K\_r\\) is a **constant matrix** that can be easily computed.
+ +The next step is to find an **optimal estimate** \\(\hat{x}\\) of the state \\(x\\) so that \\(E \big\\{ [x-\hat{x}]^T [x-\hat{x}] \big\\}\\) is minimized. +The optimal state estimate is given by a **Kalman filter**. + +The solution to the LQG problem is then found by replacing \\(x\\) by \\(\hat{x}\\) to give \\(u(t) = -K\_r \hat{x}\\). + +We therefore see that the LQG problem and its solution can be separated into two distinct parts as illustrated in Fig. [fig:lqg_separation](#fig:lqg_separation): the optimal state feedback and the optimal state estimator (the Kalman filter). + + + +{{< figure src="/ox-hugo/skogestad07_lqg_separation.png" caption="Figure 40: The separation theorem" >}} + +
+
+ +The LQR problem, where all the states are known is to find the input signal \\(u(t)\\) that takes the system \\(\dot{x} = Ax+Bu\\) to the zero state (\\(x=0\\)) by minimizing the deterministic cost + +\begin{equation} + J\_r = \int\_0^\infty \big( x(t)^T Q x(t) + u(t)^T R u(t) \big) dt +\end{equation} + +The optimal solution is \\(u=-K\_r x(t)\\) with + +\begin{equation} + K\_r = R^{-1} B^T X +\end{equation} + +and \\(X\\) is the unique positive-semi definite solution of the algebraic Riccati equation: + +\begin{equation} + A^T X + X A - XBR^{-1}B^TX + Q = 0 +\end{equation} + +
+ +
+
+ +The Kalman filter has the structure of an ordinary state-estimator, as shown on Fig. [fig:lqg_kalman_filter](#fig:lqg_kalman_filter), with: + +\begin{equation} + \dot{\hat{x}} = A\hat{x} + Bu + K\_f(y-C\hat{x}) +\end{equation} + +The optimal choice of \\(K\_f\\), which minimize \\(E \big\\{ [x-\hat{x}]^T [x-\hat{x}] \big\\}\\) is given by + +\begin{equation} + K\_f = Y C^T V^{-1} +\end{equation} + +Where \\(Y\\) is the unique positive-semi definite solution of the algebraic Riccati equation + +\begin{equation} + YA^T + AY - YC^TV^{-1}CY + W = 0 +\end{equation} + +
+ + + +{{< figure src="/ox-hugo/skogestad07_lqg_kalman_filter.png" caption="Figure 41: The LQG controller and noisy plant" >}} + +The structure of the LQG controller is illustrated in Fig. [fig:lqg_kalman_filter](#fig:lqg_kalman_filter), its transfer function from \\(y\\) to \\(u\\) is given by + +\begin{align\*} + L\_{\text{LQG}}(s) &= \left[ \begin{array}{c|c} + A - B K\_r - K\_f C & K\_f \\ \hline + -K\_r & 0 + \end{array} \right] \\\\\\ + &= \left[ \begin{array}{c|c} + A - B R^{-1} B^T X - Y C^T V^{-1} C & Y C^T V^{-1} \\ \hline + -R^{-1} B^T X & 0 + \end{array} \right] +\end{align\*} + +It has the same degree (number of poles) as the plant.
+ +For the LQG-controller, as shown on Fig. [fig:lqg_kalman_filter](#fig:lqg_kalman_filter), it is not easy to see where to position the reference input \\(r\\) and how integral action may be included, if desired. Indeed, the standard LQG design procedure does not give a controller with integral action. One strategy is illustrated in Fig. [fig:lqg_integral](#fig:lqg_integral). Here, the control error \\(r-y\\) is integrated and the regulator \\(K\_r\\) is designed for the plant augmented with these integral states. + + + +{{< figure src="/ox-hugo/skogestad07_lqg_integral.png" caption="Figure 42: LQG controller with integral action and reference input" >}} + +For an LQG-controller system with a combined Kalman filter and LQR control law, there are **no guaranteed stability margins**, and there exist LQG combinations with arbitrary small gain margins. +However, there are procedures for improving robustness properties of LQG control such as **Loop Transfer Recovery** (LTR). + +These procedure are somehow difficult to use in practice. +Their main limitation is that they can only be applied to minimum phase plants. + + +### \\(\htwo\\) and \\(\hinf\\) Control {#htwo--and--hinf--control} + + + + +#### General Control Problem Formulation {#general-control-problem-formulation} + + +There are many ways in which feedback design problems can be cast as \\(\htwo\\) and \\(\hinf\\) optimization problems. +It is very useful therefore to have a **standard problem formulation** into which any particular problem may be manipulated. + +Such a general formulation is afforded by the general configuration shown in Fig. [fig:general_control](#fig:general_control). + + + +{{< figure src="/ox-hugo/skogestad07_general_control.png" caption="Figure 43: General control configuration" >}} + +The system is described by + +\begin{subequations} + \begin{align} + \colvec{z\\v} &= P(s) \colvec{w\\u} = \begin{bmatrix}P\_{11}(s) & P\_{12}(s)\\ P\_{21}(s) & P\_{22}(s) \end{bmatrix} \colvec{w\\u}\\\\\\ + u &= K(s) v + \end{align} +\end{subequations} + +With a state space realization of the generalized plant \\(P\\) given by + +\begin{equation} + P = \left[ + \begin{array}{c|cc} + A & B\_1 & B\_2 \\\\\\ + \hline + C\_1 & D\_{11} & D\_{12} \\\\\\ + C\_2 & D\_{21} & D\_{22} + \end{array} + \right] +\end{equation} + +The closed loop transfer function from \\(w\\) to \\(z\\) is given by the linear fractional transformation: + +\begin{align\*} + z &= F\_l(P, K) w \\\\\\ + &= [P\_{11} + P\_{12}K(I-P\_{22}K)^{-1} P\_{21}] w +\end{align\*} + +\\(\htwo\\) and \\(\hinf\\) control involve the minimization of the \\(\htwo\\) and \\(\hinf\\) norms of \\(F\_l(P, K)\\).
+ +The most general and widely used algorithms for \\(\htwo\\) and \\(\hinf\\) control problems are based on the state space formulation and requires the solution of two Riccati equations. + +The following **assumptions** are typically made in \\(\htwo\\) and \\(\hinf\\) problems: + +1. \\((A, B\_2, C\_2)\\) is stabilizable and detectable. + This is required for the existence of stabilizing controllers +2. \\(D\_{12}\\) and \\(D\_{21}\\) have full rank. + This is sufficient to ensure that the controllers are proper +3. \\(\begin{bmatrix} A-j\w I & B\_2 \\ C\_1 & D\_{12} \end{bmatrix}\\) and \\(\begin{bmatrix} A-j\w I & B\_1 \\ C\_2 & D\_{21} \end{bmatrix}\\) have respectively full column and full row rank for all \\(\w\\). + This ensures that the controller does not cancel poles or zeros in the imaginary axis which would result in closed-loop instability +4. \\(D\_{11} = 0\\) and \\(D\_{22} = 0\\) is a conventional requirement for \\(\htwo\\) control. + This is not required for \\(\hinf\\) control but this significantly simplify algorithm formulas +5. \\(D\_{12}^T C\_1 = 0\\) and \\(B\_1 D\_{12}^T = 0\\) is common in \\(\htwo\\) control. + \\(D\_{12}^T C\_1 = 0\\) means that there is no cross terms in the cost function and \\(B\_1 D\_{12}^T = 0\\) that the process noise and measurement noise are uncorrelated +6. \\((A, B\_1)\\) is stabilizable and \\((A, C\_1)\\) is detectable + +If the Matlab Robust Control Toolbox complains, then it probably means that the control problem is not well formulated and that some assumptions are not met.
+ +\\(\hinf\\) algorithms, in general, find a **sub-optimal controller**. That is, for a specified \\(\gamma\\) a stabilizing controller is found for which \\(\hnorm{F\_l(P,K)}<\gamma\\). +This contrasts with \\(\htwo\\) theory, in which the optimal controller is **unique** and can be found from the solution of two Riccati equations. + + +#### \\(\htwo\\) Optimal Control {#htwo--optimal-control} + +
+
+ +The standard \\(\htwo\\) optimal control problem is to find a stabilizing controller \\(K\\) which minimizes +\\[\normtwo{F(s)} = \sqrt{\frac{1}{2\pi}\int\_{-\infty}^{\infty} tr[F(j\w)F(j\w)^H]d\w }\\] +With \\(F = F\_l(P, K)\\). + +
+ +For a particular problem, the generalized plant \\(P\\) will include the plant model, the interconnection structure, and the designer specified weighting functions.
+ +The \\(\htwo\\) norm can be given different **deterministic interpretations**. +It also has the following **stochastic interpretation**. + +Suppose in the general control configuration that the exogenous input \\(w\\) is white noise of unit density. That is +\\[ E\\{w(t)w(\tau)^T\\} = I \delta(t-\tau) \\] + +Expected power in the error signal \\(z\\) is then given by + +\begin{align\*} + P\_z &= E\bigg\\{ \lim\_{T\rightarrow\infty} \frac{1}{2T} \int\_{-T}^{T} z(t)^T z(t) dt \bigg\\} \\\\\\ + &= \text{tr}\ E\\{z(t)z(t)^H\\}\\\\\\ + &= \frac{1}{2\pi} \int\_{-\infty}^{\infty}\text{tr}\left[F(j\omega)F(j\omega)^H\right]d\omega\\\\\\ + &= \normtwo{F}^2 = \normtwo{F\_l(P,K)}^2 +\end{align\*} + +
+
+ +Thus, by minimizing the \\(\htwo\\) norm, the error power of the generalized system, due to a unit intensity white noise input, is minimized. +We are **minimizing the Root Mean Square value** of \\(z\\). + +
+ + +#### LQG: a Special \\(\htwo\\) Optimal Controller {#lqg-a-special--htwo--optimal-controller} + +An important special case of \\(\htwo\\) optimal control is the LQG problem. +For the stochastic system + +\begin{align\*} + \dot{x} &= A x + B u + w\_d \\\\\\ + y &= Cx + w\_n +\end{align\*} + +where +\\[ E \left\\{ \begin{bmatrix} w\_d(t)\\w\_n(t) \end{bmatrix} \begin{bmatrix} w\_d(\tau)^T & w\_n(\tau)^T \end{bmatrix} \right\\} = \begin{bmatrix} W & 0 \\ 0 & V \end{bmatrix} \delta (t - \tau) \\] + +The LQG problem is to find \\(u = K(s) y\\) such that +\\[ J = E \left\\{ \lim\_{T\to \infty} \frac{1}{T} \int\_0^T [x^T Q x + u^T R u] dt \right\\} \\] +is minimized with \\(Q = Q^T \ge 0\\) and \\(R = R^T > 0\\). + +This problem can be cast as an \\(\htwo\\) optimization in the general framework in the following manner. + +Define the error signal \\(z\\) as +\\[ z = \begin{bmatrix} + Q^{\frac{1}{2}} & 0 \\\\\\ + 0 & R^{\frac{1}{2}} +\end{bmatrix} \begin{bmatrix} + x \\ u +\end{bmatrix} \\] + +Represent the stochastic inputs as +\\[ \begin{bmatrix} + w\_d \\ w\_n +\end{bmatrix} = \begin{bmatrix} + W^{\frac{1}{2}} & 0 \\\\\\ + 0 & V^{\frac{1}{2}} +\end{bmatrix} w \\] +where \\(w\\) is a white noise process of unit density. + +Then the LQG cost function is +\\[ K = E \left\\{ \lim\_{T\to \infty} \frac{1}{T} \int\_0^T z(t)^T z(t) dt \right\\} = \normtwo{F\_l(P,K)}^2 \\] + + +#### \\(\hinf\\) Optimal Control {#hinf--optimal-control} + +With reference to the general control configuration on Fig. [fig:general_control](#fig:general_control), the standard \\(\hinf\\) optimal control problem is to find all stabilizing controllers \\(K\\) which minimize +\\[ \hnorm{F\_l(P, K)} = \max\_{\omega} \maxsv\big(F\_l(P, K)(j\omega)\big) \\] + +The \\(\hinf\\) norm has **several interpretations** in terms of performance. +One is that it minimizes the peak of the maximum singular value of \\(F\_l(P(j\omega), K(j\omega))\\). + +It also has a time domain interpretation as the worst-cast 2-norm: + +\begin{equation} + \hnorm{F\_l(P,K)} = \max\_{w(t)\ne0} \frac{\normtwo{z(t)}}{\normtwo{w(t)}} +\end{equation} + +where \\(\normtwo{z(t)} = \sqrt{\int\_0^\infty \sum\_i \abs{z\_i}^2 dt}\\) is the 2-norm of the vector signal.
+ +In practice, it is usually not necessary to obtain an optimal controller for the \\(\hinf\\) problem, and it is simpler to design a **sub-optimal** one. + +Let \\(\gamma\_\text{min}\\) be the minimum value of \\(\hnorm{F\_l(P,K)}\\) over all stabilizing controllers \\(K\\). +Then the \\(\hinf\\) **sub-optimal control problem** is: given a \\(\gamma > \gamma\_\text{min}\\), find all stabilizing controllers \\(K\\) such that + +\begin{equation} + \hnorm{F\_l(P, K)} < \gamma +\end{equation} + +By reducing \\(\gamma\\) in an iterative way, an optimal solution is approached.
+ +**General \\(\hinf\\) algorithm**. +For the general control configuration and with assumptions described above, there exists a stabilizing controller \\(K(s)\\) such that \\(\hnorm{F\_l(P,K)}<\gamma\\) if and only if + +1. \\(X\_\infty \ge 0\\) is a solution to the algebraic Riccati equation \\(A^T X\_\infty + X\_\infty A + C\_1^T C\_1 + X\_\infty (\gamma^{-2} B\_1 B\_1^T - B\_2 B\_2^T)X\_\infty = 0\\) such that \\(\text{Re } \lambda\_i \left[ A + (\gamma^{-2}B\_1B\_1^T - B\_2B\_2^T)X\_\infty \right] < 0, \ \forall i\\) +2. \\(Y\_\infty \ge 0\\) is a solution to the algebraic Riccati equation \\(A Y\_\infty + Y\_\infty A^T + B\_1 B\_1^T + Y\_\infty (\gamma^{-2} C\_1^T C\_1 - C\_2^T C\_2)Y\_\infty = 0\\) such that \\(\text{Re } \lambda\_i \left[ A + Y\_\infty(\gamma^{-2}C\_1^TC\_1 - C\_2^TC\_2)Y\_\infty \right] < 0, \ \forall i\\) +3. \\(\rho(X\_\infty Y\_\infty) < \gamma^2\\) + +All such controllers are then given by \\(K = F\_l(K\_c, Q)\\) where + +\begin{align\*} + K\_c(s) &= \left[ \begin{array}{c|cc} + A\_\infty & -Z\_\infty L\_\infty & Z\_\infty B\_2 \\ \hline + F\_\infty & 0 & I \\\\\\ + -C\_2 & I & 0 + \end{array} \right], \ L\_\infty = -Y\_\infty C\_2^T \\\\\\ + F\_\infty &= -B\_2^T X\_\infty, \ Z\_\infty = (I - \gamma^2 Y\_\infty X\_\infty)^{-1} \\\\\\ + A\_\infty &= A + \gamma^{-2} B\_1 B\_1^T X\_\infty + B\_2F\_\infty + Z\_\infty L\_\infty C\_2 +\end{align\*} + +and \\(Q(s)\\) is any stable proper transfer function such that \\(\hnorm{Q} < \gamma\\).
+ +For \\(Q(s) = 0\\), we get + +\begin{equation} + K(s) = K\_{c11}(s) = -Z\_\infty L\_\infty (s I - A\_\infty)^{-1} F\_\infty +\end{equation} + +This is called the **central controller** and has the same number of states as the generalized plant \\(P(s)\\). + +The central controller can be separated into a state estimator (observer) of the form +\\[ \dot{\hat{x}} = A\hat{x} + B\_1 \gamma^{-2} B\_1^T X\_\infty \hat{x} + B\_2 u + Z\_\infty L\_\infty (C\_2 \hat{x} - y) \\] +and a state feedback \\(u = F\_\infty \hat{x}\\). + +
+
+ +If we desire a controller that achieves \\(\gamma\_\text{min}\\), to within specified tolerance, then we can perform a **bisection** on \\(\gamma\\) until its value is sufficiently accurate. +The above conditions provide a test for each value of \\(\gamma\\) to determine if \\(\gamma<\gamma\_\text{min}\\) or \\(\gamma>\gamma\_\text{min}\\). + +
+ +There are mainly two methodologies for \\(\hinf\\) controller design: the **transfer function shaping approach** and the **signal-based approach**.
+ +In the **shaping approach**, \\(\hinf\\) optimization is used to shape the singular values of specified transfer functions over frequency. +The maximum singular values are relatively easy to shape by forcing them to lie below user defined bounds, thereby ensuring desirable bandwidth and roll-off rates.
+ +In the **signal-based approach**, we seek to minimize the energy in certain error signal given a set of exogenous input signals.
+ +A difficulty that sometimes arises with \\(\hinf\\) control is the **selection of weights** such that the \\(\hinf\\) optimal controller provides a good trade-off between conflicting objectives in various frequency ranges. +Thus, for practical designs it is sometimes recommended to perform only a few iterations of the \\(\hinf\\) algorithm. +The justification for this is that the initial design, after one iteration, is similar to an \\(\htwo\\) design which does trade-off over various frequency ranges. +Therefore stopping the iterations before the optimal value is achieved gives the design an \\(\htwo\\) flavor which may be desirable. + + +#### Mixed-Sensitivity \\(\hinf\\) Control {#mixed-sensitivity--hinf--control} + +Mixed-sensitivity is the name given to transfer function shaping problems in which the sensitivity function \\(S = (I + GK)^{-1}\\) is shaped along with one or more other closed-loop transfer functions such as \\(KS\\) or \\(T = I - S\\).
+ +Suppose that we have a regulation problem in which we want to reject a disturbance \\(d\\) entering at the plant output and it is assumed that the measurement noise is relatively insignificant. +It makes sense to shape the closed-loop transfer functions \\(S\\) and \\(KS\\). +Recall that \\(S\\) is the transfer function between \\(d\\) and the output, and \\(KS\\) the transfer function from \\(d\\) and the control signal. +It is important to include \\(KS\\) as a mechanism for **limiting the size and bandwidth of the controller**, and hence the energy used. +The size of \\(KS\\) is also important for robust stability with respect to uncertainty modeled as additive plant perturbations.
+ +The disturbance \\(d\\) is typically a low frequency signal, and therefore it will be successfully rejected if the maximum singular value of \\(S\\) is made small over the same low frequency range. +To do this, we could select a scalar low pass filter \\(w\_1(s)\\) with a bandwidth equal to that of the disturbance, and then find a stabilizing controller that minimizes \\(\hnorm{w\_1 S}\\). +This cost function alone is not very practical, it focuses on just one closed-loop transfer function and the controller may have infinite gain. +It is far more useful in practice to minimize + +\begin{equation} + \hnorm{\begin{matrix} w\_1 S \\ w\_2 KS \end{matrix}} +\end{equation} + +where \\(w\_2(s)\\) is a scalar high pass filter with a crossover frequency approximately equal to that of the desired closed-loop bandwidth.
+ +In general, the scalar weighting functions \\(w\_1(s)\\) and \\(w\_2(s)\\) can be replaced by matrices \\(W\_1(s)\\) and \\(W\_2(s)\\). +This can be useful for **systems with channels of quite different bandwidths**. +In that case, **diagonal weights are recommended** as anything more complicated is usually not worth the effort.
+ +To see how this mixed sensitivity problem can be formulated in the general setting, we can imagine the disturbance \\(d\\) as a single exogenous input and define and error signal \\(z = [z\_1^T\ z\_2^T]^T\\), where \\(z\_1 = W\_1 y\\) and \\(z\_2 = -W\_2 u\\) as illustrated in Fig. [fig:mixed_sensitivity_dist_rejection](#fig:mixed_sensitivity_dist_rejection). +We can then see that \\(z\_1 = W\_1 S w\\) and \\(z\_2 = W\_2 KS w\\) as required. +The elements of the generalized plant are + +\begin{equation\*} + \begin{array}{ll} + P\_{11} = \begin{bmatrix}W\_1\\0\end{bmatrix} & P\_{12} = \begin{bmatrix}W\_1G\\-W\_2\end{bmatrix} \\\\\\ + P\_{21} = -I & P\_{22} = -G + \end{array} +\end{equation\*} + + + +{{< figure src="/ox-hugo/skogestad07_mixed_sensitivity_dist_rejection.png" caption="Figure 44: \\(S/KS\\) mixed-sensitivity optimization in standard form (regulation)" >}} + +Another interpretation can be put on the \\(S/KS\\) mixed-sensitivity optimization as shown in the standard control configuration of Fig. [fig:mixed_sensitivity_ref_tracking](#fig:mixed_sensitivity_ref_tracking). +Here we consider a tracking problem. +The exogenous input is a reference command \\(r\\), and the error signals are \\(z\_1 = -W\_1 e = W\_1 (r-y)\\) and \\(z\_2 = W\_2 u\\). +As the regulation problem of Fig. [fig:mixed_sensitivity_dist_rejection](#fig:mixed_sensitivity_dist_rejection), we have that \\(z\_1 = W\_1 S w\\) and \\(z\_2 = W\_2 KS w\\). + + + +{{< figure src="/ox-hugo/skogestad07_mixed_sensitivity_ref_tracking.png" caption="Figure 45: \\(S/KS\\) mixed-sensitivity optimization in standard form (tracking)" >}} + +Another useful mixed sensitivity optimization problem, is to find a stabilizing controller which minimizes + +\begin{equation} + \hnorm{\begin{matrix} W\_1 S \\ W\_2 T \end{matrix}} +\end{equation} + +The ability to shape \\(T\\) is desirable for tracking problems and noise attenuation. +It is also important for robust stability with respect to multiplicative perturbations at the plant output. + +The \\(S/T\\) mixed-sensitivity minimization problem can be put into the standard control configuration as shown in Fig. [fig:mixed_sensitivity_s_t](#fig:mixed_sensitivity_s_t). + +The elements of the generalized plant are + +\begin{equation\*} + \begin{array}{ll} + P\_{11} = \begin{bmatrix}W\_1\\0\end{bmatrix} & P\_{12} = \begin{bmatrix}-W\_1G\\W\_2G\end{bmatrix} \\\\\\ + P\_{21} = -I & P\_{22} = -G \\\\\\ + \end{array} +\end{equation\*} + + + +{{< figure src="/ox-hugo/skogestad07_mixed_sensitivity_s_t.png" caption="Figure 46: \\(S/T\\) mixed-sensitivity optimization in standard form" >}} + +The shaping of closed-loop transfer functions as described above with the stacked cost functions becomes difficult with more than two functions whereas with two, the process is relatively easy. +The bandwidth requirements on each are usually complementary and simple, stable low-pass and high-pass filters are sufficient to carry out the required shaping and trade-offs.
+ +The weights \\(W\_i\\) in mixed-sensitivity \\(\hinf\\) optimal control must all be stable. +Therefore, if we wish, for example, to emphasize the minimization of \\(S\\) at low frequency by weighting with a term including integral action, we would have to approximate \\(\frac{1}{s}\\) by \\(\frac{1}{s + \epsilon}\\) where \\(\epsilon \ll 1\\). +Similarly, one might be interested in weighting \\(KS\\) with a non-proper weight to ensure that \\(K\\) is small outside of the system bandwidth. +The trick is to replace a non proper term such as \\((1 + \tau\_1 s)\\) by \\(\frac{1 + \tau\_1 s}{1 + \tau\_2 s}\\) where \\(\tau\_2 \ll \tau\_1\\). + + +#### Signal-Based \\(\hinf\\) Control {#signal-based--hinf--control} + +The signal-based approach to controller design is very general and is appropriate for multivariable problems in which several objectives must be taken into account simultaneously. +In this approach, we define the plant, possibly the model uncertainty, the **class of external signals affecting the system** and the **norm of the error signals we want to keep small**. + +
+
+ +The focus of attention has moved to the size of signals and away from the size and bandwidth of selected closed-loop transfer functions. + +
+ +Weights are used to describe the expected or known frequency content of exogenous signals and the desired frequency content of error signals. +Weights are also used if a perturbation is used to model uncertainty, as in Fig. [fig:input_uncertainty_hinf](#fig:input_uncertainty_hinf), where \\(G\\) represents the nominal model, \\(W\\) is a weighting function that captures the relative model fidelity over frequency, and \\(\Delta\\) represents unmodelled dynamics usually normalized such that \\(\hnorm{\Delta} < 1\\). + + + +{{< figure src="/ox-hugo/skogestad07_input_uncertainty_hinf.png" caption="Figure 47: Multiplicative dynamic uncertainty model" >}} + +LQG control is a simple example of the signal based approach, in which the exogenous signals are assumed to be stochastic and the error signals are measured in terms of the 2-norm. +As we have seen, the weights \\(Q\\) and \\(R\\) are constant, but LQG can be generalized to include frequency dependent weights on the signals leading to what is called Wiener-Hopf design or \\(\htwo\\) control.
+ +When we consider a system's response to persistent sinusoidal signals of varying frequency, or when we consider the induced 2-norm between the exogenous input signals and the error signals, we are required to minimize the \\(\hinf\\) norm. +In the absence of model uncertainty, there does not appear to be an overwhelming case for using the \\(\hinf\\) norm rather than the more traditional \\(\htwo\\) norm. +However, when uncertainty is addressed, as it always should be, \\(\hinf\\) is clearly the more **natural approach** using component uncertainty models as in Fig. [fig:input_uncertainty_hinf](#fig:input_uncertainty_hinf).
+ +A typical problem using the signal-based approach to \\(\hinf\\) control is illustrated in the interconnection diagram of Fig. [fig:hinf_signal_based](#fig:hinf_signal_based). +\\(G\\) and \\(G\_d\\) are nominal models of the plant and disturbance dynamics, and \\(K\\) is the controller to be designed. +The weights \\(W\_d\\), \\(W\_r\\), and \\(W\_n\\) may be constant or dynamic and describe the relative importance and/or the frequency content of the disturbance, set points and noise signals. +The weight \\(W\_\text{ref}\\) is a desired closed-loop transfer function between the weighted set point \\(r\_s\\) and the actual output \\(y\\). +The weights \\(W\_e\\) and \\(W\_u\\) reflect the desired frequency content of the error \\((y-y\_\text{ref})\\) and the control signals \\(u\\), respectively. +The problem can be cast as a standard \\(\hinf\\) optimization in the general control configuration by defining + +\begin{equation\*} + w = \begin{bmatrix}d\\r\\n\end{bmatrix},\ z = \begin{bmatrix}z\_1\\z\_2\end{bmatrix}, \ v = \begin{bmatrix}r\_s\\y\_m\end{bmatrix},\ u = u +\end{equation\*} + + + +{{< figure src="/ox-hugo/skogestad07_hinf_signal_based.png" caption="Figure 48: A signal-based \\(\hinf\\) control problem" >}} + +Suppose we now introduce a multiplicative dynamic uncertainty model at the input to the plant as shown in Fig. [fig:hinf_signal_based_uncertainty](#fig:hinf_signal_based_uncertainty). +The problem we now want to solve is: find a stabilizing controller \\(K\\) such that the \\(\hinf\\) norm of the transfer function between \\(w\\) and \\(z\\) is less that 1 for all \\(\Delta\\) where \\(\hnorm{\Delta} < 1\\). +We have assumed in this statement that the **signal weights have normalized the 2-norm of the exogenous input signals to unity**. +This problem is a non-standard \\(\hinf\\) optimization. +It is a robust performance problem for which the \\(\mu\text{-synthesis}\\) procedure can be applied where we require the structured singular value: +\\[ \mu(M(j\omega)) < 1, \quad \forall\omega \\] + + + +{{< figure src="/ox-hugo/skogestad07_hinf_signal_based_uncertainty.png" caption="Figure 49: A signal-based \\(\hinf\\) control problem with input multiplicative uncertainty" >}} + +However, whilst the structured singular value is a useful analysis tool for assessing designs, \\(\mu\text{-synthesis}\\) is sometimes difficult to use and often too complex for the practical problems. + + +### \\(\hinf\\) Loop-Shaping Design {#hinf--loop-shaping-design} + +The loop-shaping design procedure described in this section is based on \\(\hinf\\) robust stabilization combined with classical loop shaping. +It is essentially a **two stage design process**: + +- First the open-loop plant is augmented by pre and post compensators to give a desired shape to the singular values of the open-loop frequency response +- Then the resulting shaped plant is robustly stabilized with respect to coprime factor uncertainty using \\(\hinf\\) optimization + +An important advantage is that no problem-dependent uncertainty modelling, or weight selection, is required in this second step. + + +#### Robust Stabilization {#robust-stabilization} + +For multivariable systems, **classical gain and phase margins are unreliable indicators of robust stability** when defined for each channel (or loop), taken one at a time, because simultaneous perturbations in more than one loop are not then catered for.
+ +It is now common practice to model uncertainty by stable **norm-bounded** dynamic (complex) **matrix perturbations**. +With a single perturbation, the associated robustness tests is in terms of the maximum singular values of various closed-loop transfer functions. +Use of a single stable perturbation restricts the plant and perturbed plant models to either have the same number of unstable poles or the same number of RHP zeros.
+ +To overcome this, **two stable perturbations** can be used, one on each of the factors in a **coprime factorization** of the plant. +Although this uncertainty description seems unrealistic and less intuitive than the others, it is in fact quite general, and for our purposes it leads to a very useful \\(\hinf\\) robust stabilization problem.
+ +Let's consider the stabilization of a plant \\(G\\) which has a normalized left coprime factorization + +\begin{equation} + G = M^{-1} N +\end{equation} + +where we have dropped the subscripts from \\(M\\) and \\(N\\) for simplicity. + +A perturbed plant model \\(G\_p\\) can then we written has + +\begin{equation} + G\_p = (M + \Delta\_M)^{-1} (N + \Delta\_N) +\end{equation} + +where \\(\Delta\_M\\), \\(\Delta\_N\\) are stable unknown transfer functions which represent the uncertainty in the nominal plant \\(G\\).
+ +The objective of robust stabilization is to stabilize not only the nominal model \\(G\\), but a family of perturbed plants defined by + +\begin{equation} + G\_p = \\{ (M + \Delta\_M)^{-1} (N + \Delta\_N) \ :\ \hnorm{\Delta\_N\ \Delta\_M} < \epsilon \\} +\end{equation} + +where \\(\epsilon > 0\\) is then the **stability margin**.
+ +For the perturbed feedback system of Fig. [fig:coprime_uncertainty_bis](#fig:coprime_uncertainty_bis), the stability property is robust if and only if the nominal feedback system is stable and +\\[ \gamma \triangleq \hnorm{\begin{bmatrix}K \\ I\end{bmatrix} (I - GK)^{-1} M^{-1}} \le \frac{1}{\epsilon} \\] + +Notice that \\(\gamma\\) is the \\(\hinf\\) norm from \\(\phi\\) to \\(\begin{bmatrix}u\\y\end{bmatrix}\\) and \\((I-GK)^{-1}\\) is the sensitivity function for this positive feedback arrangement. + + + +{{< figure src="/ox-hugo/skogestad07_coprime_uncertainty_bis.png" caption="Figure 50: \\(\hinf\\) robust stabilization problem" >}} + +The lowest achievable value of \\(\gamma\\) and the corresponding maximum stability margin \\(\epsilon\\) are given as + +\begin{equation} + \gamma\_\text{min} = \epsilon\_{\text{max}}^{-1} = \left\\{ 1 - \\|N \ M\\|\_H^2 \right\\}^{-\frac{1}{2}} = (1 + \rho(XZ))^{\frac{1}{2}} +\end{equation} + +where \\(\\|\ \cdot\ \\|\_H\\) denotes Hankel norm, \\(\rho\\) denotes the spectral radius (maximum eigenvalue), and for a minimal state space realization of G, Z is the unique positive definite solution of the algebraic Riccati equation + +\begin{align\*} + (A - BS^{-1} D^TC)Z &+ Z(A - BS^{-1}D^TC)^T \\\\\\ + &- ZC^TR^{-1}CZ + BS^{-1}B^T = 0 +\end{align\*} + +where +\\[ R = I + D D^T, \quad S = I + D^T D \\] +\\(X\\) is the unique positive definite solution of the following algebraic Riccati equation + +\begin{align\*} + (A - BS^{-1} D^T C)X &+ X(A - BS^{-1}D^TC)^T \\\\\\ + &- XBS^{-1} B^T X + C^TR^{-1}C = 0 +\end{align\*} + +A controller which guarantees that +\\[ \hnorm{ \begin{bmatrix}K\\I\end{bmatrix} (I-GK)^{-1} M^{-1} } \le \gamma \\] +for a specified \\(\gamma > \gamma\_\text{min}\\), is given by + +\begin{subequations} + \begin{align} + K &\triangleq \left[ \begin{array}{c|c} + {\scriptstyle A + BF + \gamma^2L^{-T} Z C^T(C + DF)} & {\scriptstyle \gamma^2L^{-T} Z C^T} \\ \hline + {\scriptstyle B^T X} & {\scriptstyle -D^T} + \end{array} \right] \label{eq:control\_coprime\_factor} \\\\\\ + F &= -S^{-1}(D^T C + B^T X)\\\\\\ + L &= (1-\gamma^2) I + XZ + \end{align} +\end{subequations} + +The Matlab function `coprimeunc` can be used to generate the controller in [eq:control_coprime_factor](#eq:control_coprime_factor). +It is important to emphasize that since we can compute \\(\gamma\_\text{min}\\) from [eq:gamma_min_coprime](#eq:gamma_min_coprime) we get an explicit solution by solving just two Riccati equations and avoid the \\(\gamma\text{-iteration}\\) needed to solve the general \\(\mathcal{H}\_\infty\\) problem. + + +#### A Systematic \\(\hinf\\) Loop-Shaping Design Procedure {#a-systematic--hinf--loop-shaping-design-procedure} + + +Robust stabilization alone is not much used in practice because the designer is not able to specify any performance requirements. + +To do so, **pre and post compensators** are used to **shape the open-loop singular values** prior to robust stabilization of the "shaped" plant. + +If \\(W\_1\\) and \\(W\_2\\) are the pre and post compensators respectively, then the shaped plant \\(G\_s\\) is given by + +\begin{equation} + G\_s = W\_2 G W\_1 +\end{equation} + +as shown in Fig. [fig:shaped_plant](#fig:shaped_plant). + + + +{{< figure src="/ox-hugo/skogestad07_shaped_plant.png" caption="Figure 51: The shaped plant and controller" >}} + +The controller \\(K\_s\\) is synthesized by solving the robust stabilization problem for the shaped plant \\(G\_s\\) with a normalized left coprime factorization \\(G\_s = M\_s^{-1}N\_s\\). +The feedback controller for the plant \\(G\\) is then \\(K = W\_1 K\_s W\_2\\).
+ +Systematic procedure for \\(\hinf\\) loop-shaping design: + +1. **Scale the plant outputs and inputs**. + This is very important for most design procedures. + In general, scaling improves the conditioning of the design problem, it enables meaningful analysis to be made of the robustness properties of the feedback system in the frequency domain, and for loop shaping it can simplify the selection of weights: + - The outputs are scaled such that equal magnitudes of cross-coupling into each of the outputs is equally undesirable + - Each input is scaled by a given percentage (say \\(\SI{10}{\%}\\)) of its expected range of operation. + That is, the inputs are scaled to reflect the relative actuator capabilities. +2. **Order the inputs and outputs** so that the plant is as diagonal as possible. + The relative gain array can be useful here. + The purpose of this pseudo-diagonalization is to ease the design of the pre and post compensators which, for simplicity, will be chosen to be diagonal. + Next, we discuss the selection of weights to obtain the shaped plant \\(G\_s = W\_2 G W\_1\\) where \\(W\_1 = W\_p W\_a W\_g\\) +3. **Select the elements of diagonal pre and post compensators** \\(W\_p\\) and \\(W\_2\\) so that the singular values of \\(W\_2 G W\_p\\) are desirable. + This would normally mean high gain at low frequencies, a slope of about \\(-1\\) at the desired bandwidth(s), with higher rates at high frequencies. + The weights should be chosen so that no unstable hidden modes are created in \\(G\_s\\) + - \\(W\_2\\) is usually chosen as a constant, reflecting the relative importance of the outputs to be controlled and the other measurements being fed back to the controller + - \\(W\_p\\) contains the dynamic shaping. Integral action, for low frequency performance; phase-advance for reducing the roll-off rates at crossover; and phase-lag to increase the roll-off rates at high frequencies should all be places in \\(W\_p\\) is desired +4. _Optional_: Align the singular values at a desired bandwidth using a further constant weight \\(W\_a\\) cascaded with \\(W\_p\\) +5. _Optional_: Introduce an additional gain matrix \\(W\_g\\) cascaded with \\(W\_a\\) to provide control over actuator range. \\(W\_g\\) is diagonal and is adjusted so that actuator rate limits are not exceeded for reference demands and typical disturbances on the scaled plant outputs +6. **Robustly stabilize the shaped plant** \\(G\_s = W\_2 G W\_1\\) where \\(W\_1 = W\_p W\_a W\_g\\) + - First, calculate the maximum stability margin \\(\epsilon\_{\text{max}} = 1/\gamma\_\text{min}\\) + - If the margin is too small, \\(\epsilon\_{\text{max}} < 0.25\\), then go back to step 4 and modify the weights. Otherwise, select \\(\gamma > \gamma\_\text{min}\\), by about \\(\SI{10}{\%}\\), and synthesize a sub-optimal controller. There is usually no advantage to be gained by using the optimal controller + - When \\(\epsilon\_{\text{max}} > 0.25\\) (respectively \\(\gamma\_\text{min} < 4\\)) the design is usually successful. In this case, at least \\(\SI{25}{\%}\\) coprime factor uncertainty is allowed, and we also find that the shape of the open-loop singular values will not have changed much after robust stabilization + - A small value of \\(\epsilon\_{\text{max}}\\) indicates that the chosen singular value loop-shapes are incompatible with robust stability requirements +7. **Analyze the design** and if not all the specification are met, make further modifications to the weights +8. **Implement the controller**. + The configuration shown in Fig. [fig:shapping_practical_implementation](#fig:shapping_practical_implementation) has been found useful when compared with the conventional setup in Fig. [fig:classical_feedback_small](#fig:classical_feedback_small). + This is because the references do not directly excite the dynamics of \\(K\_s\\), which can result in large amounts of overshoot. + The constant prefilter ensure a steady-state gain of \\(1\\) between \\(r\\) and \\(y\\), assuming integral action in \\(W\_1\\) or \\(G\\) + + + +{{< figure src="/ox-hugo/skogestad07_shapping_practical_implementation.png" caption="Figure 52: A practical implementation of the loop-shaping controller" >}} + +We will conclude this section with a summary of the **advantages** offered by the above \\(\hinf\\) loop-shaping design procedure: + +- It is relatively easy to use, being based on classical loop-shaping ideas +- There exists a closed formula for the \\(\hinf\\) optimal cost \\(\gamma\_\text{min}\\), which in turn corresponds to a maximum stability margin \\(\epsilon\_{\text{max}} = 1/\gamma\_\text{min}\\) +- No \\(\gamma\text{-iteration}\\) is required in the solution +- Except for special systems, ones with all-pass factors, there are no pole-zero cancellations between the plant and controller. + Pole-zeros cancellations are common in many \\(\hinf\\) control problems and are a problem when the plant has lightly damped modes + + +#### Two Degrees-of-freedom Controllers {#two-degrees-of-freedom-controllers} + +Many control design problems possess two degrees-of-freedom: + +- on one hand, **measurement of feedback signals** +- and on the other hand, **commands and reference** + +Sometimes, one degree-of-freedom is left out of the design, and the controller is driven by an error signal i.e. the difference between a command and the output. +But in cases where stringent time-domain specifications are set on the output response, a one degree-of-freedom structure may not be sufficient.
+ +A general two degrees-of-freedom feedback control scheme is depicted in Fig. [fig:classical_feedback_2dof_simple](#fig:classical_feedback_2dof_simple). +The commands and feedbacks enter the controller separately and are independently processed. + + + +{{< figure src="/ox-hugo/skogestad07_classical_feedback_2dof_simple.png" caption="Figure 53: General two degrees-of-freedom feedback control scheme" >}} + +The presented \\(\mathcal{H}\_\infty\\) loop-shaping design procedure in section [sec:hinf_loop_shaping_procedure](#sec:hinf_loop_shaping_procedure) is a one-degree-of-freedom design, although a **constant** pre-filter can be easily implemented for steady-state accuracy. +However, this may not be sufficient and a dynamic two degrees-of-freedom design is required.
+ +The design problem is illustrated in Fig. [fig:coprime_uncertainty_hinf](#fig:coprime_uncertainty_hinf). +The feedback part of the controller \\(K\_2\\) is designed to meet robust stability and disturbance rejection requirements. +A prefilter is introduced to force the response of the closed-loop system to follow that of a specified model \\(T\_{\text{ref}}\\), often called the **reference model**. + + + +{{< figure src="/ox-hugo/skogestad07_coprime_uncertainty_hinf.png" caption="Figure 54: Two degrees-of-freedom \\(\mathcal{H}\_\infty\\) loop-shaping design problem" >}} + +The design problem is to find the stabilizing controller \\(K = [K\_1,\ K\_2]\\) for the shaped plant \\(G\_s = G W\_1\\), with a normalized coprime factorization \\(G\_s = M\_s^{-1} N\_s\\), which minimizes the \\(\mathcal{H}\_\infty\\) norm of the transfer function between the signals \\([r^T\ \phi^T]^T\\) and \\([u\_s^T\ y^T\ e^T]^T\\) as defined in Fig. [fig:coprime_uncertainty_hinf](#fig:coprime_uncertainty_hinf). +This problem is easily cast into the general configuration. + +The control signal to the shaped plant \\(u\_s\\) is given by: +\\[ u\_s = \begin{bmatrix} K\_1 & K\_2 \end{bmatrix} \begin{bmatrix} \beta \\ y \end{bmatrix} \\] +where \\(K\_1\\) is the prefilter, \\(K\_2\\) is the feedback controller, \\(\beta\\) is the scaled reference and \\(y\\) is the measured output. +The purpose of the prefilter is to ensure that: +\\[ \left\\| (I - G\_s K\_2)^{-1} G\_s K\_1 - T\_{\text{ref}} \right\\|\_\infty < \gamma \rho^2 \\] +\\(T\_{\text{ref}}\\) is the desired closed-loop transfer function and \\(\rho\\) is a scalar parameter that the designer can increase to place more emphasis on model matching in the optimization at the expense of robustness.
+ +The main steps required to synthesize a two degrees-of-freedom \\(\mathcal{H}\_\infty\\) loop-shaping controller are: + +1. Design a one degree-of-freedom \\(\mathcal{H}\_\infty\\) loop-shaping controller (section [sec:hinf_loop_shaping_procedure](#sec:hinf_loop_shaping_procedure)) but without a post-compensator \\(W\_2\\) +2. Select a desired closed-loop transfer function \\(T\_{\text{ref}}\\) between the commands and controller outputs +3. Set the scalar parameter \\(\rho\\) to a small value greater than \\(1\\); something in the range \\(1\\) to \\(3\\) will usually suffice +4. For the shaped \\(G\_s = G W\_1\\), the desired response \\(T\_{\text{ref}}\\), and the scalar parameter \\(\rho\\), solve the standard \\(\mathcal{H}\_\infty\\) optimization problem to a specified tolerance to get \\(K = [K\_1,\ K\_2]\\) +5. Replace the prefilter \\(K\_1\\) by \\(K\_1 W\_i\\) to give exact model-matching at steady-state. +6. Analyze and, if required, redesign making adjustments to \\(\rho\\) and possibly \\(W\_1\\) and \\(T\_{\text{ref}}\\) + +The final two degrees-of-freedom \\(\mathcal{H}\_\infty\\) loop-shaping controller is illustrated in Fig. [fig:hinf_synthesis_2dof](#fig:hinf_synthesis_2dof). + + + +{{< figure src="/ox-hugo/skogestad07_hinf_synthesis_2dof.png" caption="Figure 55: Two degrees-of-freedom \\(\mathcal{H}\_\infty\\) loop-shaping controller" >}} + + +#### Observer-Based Structure for \\(\hinf\\) Loop-Shaping Controllers {#observer-based-structure-for--hinf--loop-shaping-controllers} + +\\(\mathcal{H}\_\infty\\) designs exhibit an observer/state feedback structure in the controller. +The clear structure of the \\(\mathcal{H}\_\infty\\) loop-shaping controllers has several advantages: + +- It is helpful in describing a controller's function +- It lends itself to implementation in a gain-schedule scheme +- If offers computational savings in digital implementations + +Let's assume that the shaped plant is strictly proper, with a stabilizable and detectable state space realization +\\[ G\_s \triangleq \left[ \begin{array}{c|c} + A\_s & B\_s \\ \hline + C\_s & 0 +\end{array} \right] \\] + +The single degree-of-freedom \\(\mathcal{H}\_\infty\\) loop-shaping controller can be realized as an observer for the shaped plant plus a state feedback control law: + +\begin{align\*} + \dot{\hat{x}}\_s &= A\_s \hat{x}\_s + H\_s(C\_s \hat{x}\_s - y\_s) + B\_s u\_s \\\\\\ + u\_s &= K\_s \hat{x}\_s +\end{align\*} + +where \\(\hat{x}\_s\\) is the observer state, \\(u\_s\\) and \\(y\_s\\) are respectively the input and output of the shaped plant, and + +\begin{align\*} + H\_s &= -Z\_s C\_s^T \\\\\\ + K\_s &= -B\_s^T [I - \gamma^{-2}I - \gamma^{-2} X\_s Z\_s]^{-1} X\_s +\end{align\*} + +where \\(Z\_s\\) and \\(X\_s\\) are the appropriate solutions to the generalized algebraic Riccati equations for \\(G\_s\\). + +The same can be done for two degrees-of-freedom controllers. + + +#### Implementation Issues {#implementation-issues} + + +##### Discrete-time controllers {#discrete-time-controllers} + +For implementation purposes, discrete-time controllers are usually required. +These can be obtained from a continuous-time design using a **bilinear transformation** from the \\(s\text{-domain}\\) to the \\(z\text{-domain}\\), but there can be advantages in being able to design directly in discrete time. + + +##### Anti-windup {#anti-windup} + +In \\(\hinf\\) loop-shaping the pre compensator weight \\(W\_1\\) would normally include integral action in order to reject low frequency disturbances acting on the system. +However, in the case of actuator saturation, the integrators continue to integrate their input and hence cause **windup** problems. +An anti-windup scheme is therefore required on the weighting function \\(W\_1\\). +The approach we recommend is to implement the weight \\(W\_1\\) in its self-conditioned or Hanus form. +Let the weight \\(W\_1\\) have a realization +\\[ W\_1 \triangleq \left[ \begin{array}{c|c} + A\_w & B\_w \\ \hline + C\_w & D\_w +\end{array} \right] \\] +and let \\(u\\) be the input to the plant actuators and \\(u\_s\\) the input to the shaped plant. +Then \\(u = W\_1 u\_s\\). +When implemented in Hanus form, the expression for \\(u\\) becomes +\\[ u = \left[ \begin{array}{c|cc} + A\_w - B\_wD\_w^{-1}C\_w & 0 & B\_wD\_w^{-1} \\ \hline + C\_w & D\_w & 0 +\end{array} \right] \begin{bmatrix} + u\_s \\ u\_a +\end{bmatrix} \\] +where \\(u\_a\\) is the **actual plant input**, that is the measurement at the **output of the actuators** which therefore contains information about possible actuator saturation. + +The situation is illustrated in Fig. [fig:weight_anti_windup](#fig:weight_anti_windup), where the actuators are each modeled by a unit gain and a saturation. + + + +{{< figure src="/ox-hugo/skogestad07_weight_anti_windup.png" caption="Figure 56: Self-conditioned weight \\(W\_1\\)" >}} + +The Hanus form prevents windup by keeping the states of \\(W\_1\\) consistent with the actual plant input at all times. +When there is no saturation, \\(u\_a=u\\), the dynamics of \\(W\_1\\) remains unaffected. +But when \\(u\_a\neq u\\), the dynamics are inverted and driven by \\(u\_a\\) so that the states remain consistent with the actual plant input \\(u\_a\\). +Notice that such an implementation requires \\(W\_1\\) to be invertible and minimum phase. + + +##### Bumpless transfer {#bumpless-transfer} + +When multi-mode switched controller is designed, one should ensure **smooth transition from one controller to the other** (bumpless transfer). +It was found useful to condition the reference models and the observers in each of the controllers. +When on-line, the observer state evolves according to +\\[ \dot{\hat{x}}\_s = A\_s \hat{x}\_s + H\_s (C\_s \hat{x}\_s - y\_s) + B\_s u\_s \\] +but when off-line, the state equation becomes +\\[ \dot{\hat{x}}\_s = A\_s \hat{x}\_s + H\_s (C\_s \hat{x}\_s - y\_s) + B\_s u\_{as} \\] +where \\(u\_{as}\\) is the actual input to the shaped plant governed by the on-line controller. + +Doing so ensure that the inputs to the shaped plant for the off-line controller follows the actual shaped plant input \\(u\_{as}\\) given by the on-line controller. +The observer based structure of the \\(\mathcal{H}\_\infty\\) loop-shaping controller is then helpful for such technique. + + +### Conclusion {#conclusion} + +Several methods and techniques for controller design have been described. +The emphasis has been on \\(\hinf\\) loop shaping which is easy to apply and works well in practice. +It combines classical loop-shaping ideas with an effective method for robustly stabilizing the feedback loop.
+ +For complex problems, such as unstable plants with multiple gain crossover frequencies, it may not be easy to decide on a desired loop shape. +In which case, we would suggest doing an initial LQG design (with simple weights) and using the resulting loop shape as the desired one for the \\(\hinf\\) loop shaping.
+ +And alternative to \\(\hinf\\) loop shaping is a standard \\(\hinf\\) design with a stacked cost function such as in \\(S/KS\\) mixed-sensitivity optimization. +In this approach, \\(\hinf\\) optimization is used to shape two or sometimes three closed-loop transfer functions. +However, with more functions, the shaping becomes increasingly difficult for the designer.
+ +In other design situations where there are several performance objectives, it may be more appropriate to follow a signal-based \\(\htwo\\) or \\(\hinf\\) approach. +But again, the problem formulations become so complex that the designer has little direct influence on the design.
+ +After a design, the resulting controller should be analyzed with respect to robustness and tested using nonlinear simulations. +For the study of robustness, we recommend \\(\mu\text{-analysis}\\). If the design is not robust, then the weights should be modified. +Sometimes, one might consider synthesizing a \\(\mu\text{-optimal}\\) controller, but this complexity is rarely necessary in practice. +Moreover, one should be careful about combining controller synthesis and analysis into a single step. + + +## Controller Structure Design {#controller-structure-design} + + + + +### Introduction {#introduction} + +In previous sections, we considered the general problem formulation in Fig. [fig:general_control_names_bis](#fig:general_control_names_bis) and stated that the controller design problem is to find a controller \\(K\\) which based on the information in \\(v\\), generates a control signal \\(u\\) which counteracts the influence of \\(w\\) on \\(z\\), thereby minimizing the closed loop norm from \\(w\\) to \\(z\\). + + + +{{< figure src="/ox-hugo/skogestad07_general_control_names_bis.png" caption="Figure 57: General Control Configuration" >}} + +In this chapter we are concerned with the **structural decisions** associated with the following selection tasks of control structure design: + +- **Controlled outputs**: What are the variables \\(z\\)? +- **Manipulations and measurements**: What are the variable set \\(u\\) and \\(v\\)? +- **Control configuration**: What is the structure of \\(K\\)? +- **Controller type**: What algorithm is used for \\(K\\)? + +The distinction between the words under control _structure_ and control _configuration_ are significant. +The _control structure_ refers to all structural decisions included in the design of a control system. +On the other hand, the _control configuration_ refers only to the structuring of the controller \\(K\\) itself.
+ +Ideally, the tasks involved in designing a complete control system are performed sequentially; first a "top down" selection of controller outputs, measurements and inputs, and then a "bottom up" design of the control system in which the selection of the control configuration is the most important decision. +However, in practice the tasks are closely related so the procedure may involve iteration.
+ +One important reason for decomposing the control system into a specific _control configuration_ is that it may **allow for simple tuning** of the sub-controllers **without the need for a detailed plant model** describing the dynamics and interactions in the process. +Multivariable centralized controllers may always outperform decomposed (decentralized) controllers, bus this performance gain must be traded off against the cost of obtaining and maintaining a sufficiently detailed plant model.
+ +The number of possible control structure is usually very large. +Fortunately, we can often from physical insight obtain a reasonable choice of controlled outputs, measurements and manipulated inputs. + + +### Optimization and Control {#optimization-and-control} + +The selection of controlled outputs involves selecting the variables \\(y\\) to be controlled at given reference values \\(y \approx r\\). +The reference value \\(r\\) is usually set at some higher layer in the control hierarchy which is often divided into two layers: + +- **Optimization layer**: computes the desired reference commands \\(r\\) +- **Control layer**: implements these commands to achieve \\(y \approx r\\) + +Additional layers are possible, as is illustrated in Fig. [fig:control_system_hierarchy](#fig:control_system_hierarchy) which shows a typical control hierarchy for a chemical plant. + + + +{{< figure src="/ox-hugo/skogestad07_system_hierarchy.png" caption="Figure 58: Typical control system hierarchy in a chemical plant" >}} + +In general, the information flow in such a control hierarchy is based on the higher layer sending reference values (setpoints) to the layer below reporting back any problems achieving this (see Fig. [fig:optimize_control_b](#fig:optimize_control_b)). +There is usually a time scale separation between the layers which means that the **setpoints**, as viewed from a given layer, are **updated only periodically**.
+ +The optimization tends to be performed open-loop with limited use of feedback. On the other hand, the control layer is mainly based on feedback information. +The **optimization is often based on nonlinear steady-state models**, whereas we often use **linear dynamic models in the control layer**.
+ +From a theoretical point of view, the optimal performance is obtained with a **centralized optimizing controller**, which combines the two layers of optimizing and control (see Fig. [fig:optimize_control_c](#fig:optimize_control_c)). +All control actions in such an ideal control system would be perfectly coordinated and the control system would use on-line dynamic optimization based on nonlinear dynamic model of the complete plant. +However, this solution is normally not used for a number a reasons, included the cost of modeling, the difficulty of controller design, maintenance, robustness problems and the lack of computing power. + + +
+ Table 6: + Alternative structures for optimization and control +
+ +| ![](/ox-hugo/skogestad07_optimize_control_a.png) | ![](/ox-hugo/skogestad07_optimize_control_b.png) | ![](/ox-hugo/skogestad07_optimize_control_c.png) | +|--------------------------------------------------|--------------------------------------------------------------------------------|-------------------------------------------------------------| +| Open loop optimization | Closed-loop implementation with separate control layer | Integrated optimization and control | + + +### Selection of Controlled Outputs {#selection-of-controlled-outputs} + +A **controlled output** is an output variable (usually measured) with an associated control objective (usually a reference value). +In many cases, it is clear from a physical understanding of the process what the controlled outputs should be. +In other cases, it is less obvious because each control objective may not be associated with a measured output variable.
+ +In the following, we let \\(y\\) denote the selected controller outputs in the control layer. +Two distinct questions arise: + +1. What variables \\(y\\) should be selected? +2. What is the optimal reference value \\(y\_\text{opt}\\)? + +For the first problem, we make the following assumptions: + +1. The overall goal can be quantified in terms of a **scalar cost function** \\(J\\) which we want to minimize +2. For a given disturbance \\(d\\), there exists an optimal value \\(u\_\text{opt}(d)\\) and corresponding value \\(y\_\text{opt}(d)\\) which minimizes the cost function \\(J\\) +3. The reference values \\(r\\) for the controlled outputs \\(y\\) should be constant, i.e. \\(r\\) should be independent of the disturbances \\(d\\) + +The system behavior is a function of the independent variables \\(u\\) and \\(d\\): \\(J = J(u, d)\\). +For a given disturbance \\(d\\) the optimal value of the cost function is + +\begin{equation} + J\_\text{opt}(d) \triangleq J(u\_\text{opt}, d) = \min\_u J(u, d) +\end{equation} + +In practice \\(u \neq u\_\text{opt}\\), and we have a loss which can be quantified by \\(L = J - J\_\text{opt}\\). +A reasonable objective for selecting controlled outputs \\(y\\) is to minimize some norm of the loss, for instance the worst-case loss: + +\begin{equation} + \Phi \triangleq \max\_{d \in \mathcal{D}} |\underbrace{J(u, d) - J(u\_\text{opt}, d)}\_{L}| +\end{equation} + +where \\(\mathcal{D}\\) is the set of possible disturbances. + + +#### Direct Evaluation of Cost {#direct-evaluation-of-cost} + +The "brute force" approach for selecting controlled variables is to evaluate the loss for alternative sets of controlled variable. +By solving the non linear equations, we evaluate directly the cost function \\(J\\) for various disturbances \\(d\\). +The set of controlled outputs with smallest worst case or average value of \\(J\\) is then preferred. +This approach may be time consuming because the solution of the nonlinear equations must be repeated for each candidate set of controlled outputs. + + +#### Linear Analysis {#linear-analysis} + +Consider the loss \\(L = J(u,d) - J\_\text{opt}(d)\\) where \\(d\\) is a fixed disturbance. +We make the following additional assumptions: + +
    +
  1. The cost function \\(J\\) is smooth (twice differentiable)
  2. +
  3. The optimization problem is unconstrained. +If it is optimal to keep some variable at a constant, then we assume that this is implemented and consider the remaining unconstrained problem
  4. +
  5. The dynamics of the problem can be neglected, that is, **we consider the steady-state control and optimization**
  6. +
+ +For a fixed \\(d\\) we may express \\(J(u, d)\\) in terms of a Taylor series expansion in \\(u\\) around the optimal point. +By neglecting terms of third order and higher, we obtain: +\\[ J(u, d) = J\_\text{opt}(d) + \frac{1}{2} (u - u\_\text{opt}(d))^T \left(\frac{\partial^2 J}{\partial u^2}\right)\_\text{opt} (u - u\_\text{opt}(d)) \\] +This quantifies how \\(u-u\_\text{opt}\\) affects the cost function. +For a fixed \\(d\\), we have: \\(y - y\_\text{opt} = G (u - u\_\text{opt})\\) where \\(G\\) is the steady state gain matrix. +Thus, we get: +\\[ J - J\_\text{opt} \approx \frac{1}{2} \big(G^{-1}(y-y\_\text{opt})\big)^T \left(\frac{\partial^2 J}{\partial u^2}\right)\_\text{opt} G^{-1} (y - y\_\text{opt}) \\] + +We conclude that we should select \\(y\\) such that: + +1. \\(G^{-1}\\) is small: the inputs have a large effect on \\(y\\) +2. \\(e\_\text{opt} = r - y\_\text{opt}(d)\\) is small: its optimal value \\(y\_\text{opt}(d)\\) depends only weakly on the disturbances and other changes +3. \\(e = y - r\\) is small: it is easy to keep the control error \\(e\\) small + +Note that \\(\bar{\sigma}(G^{-1}) = 1/\underline{\sigma}(G)\\) and so **we want the smallest singular value of the steady state gain matrix to be large**. + +As this depends of scaling, we should first **scale the outputs** such that the expected magnitude of \\(y\_i - y\_{i\_\text{opt}}\\) is similar in magnitude for each output, and **scale the inputs** such that the effect of a given deviation \\(u\_j - u\_{j\_\text{opt}}\\) on the cost function \\(J\\) is similar for each input.
+ +
+
+ +The use of the minimum singular value to select controlled outputs may be summarized in the following procedure: + +1. From a (nonlinear) model compute the optimal parameters (inputs and outputs) for various conditions (disturbances, operating points). + This yields a "look-up" table for optimal parameter values as a function of the operating conditions +2. From this data, obtain for each candidate output the variation in its optimal value + \\[ v\_i = \frac{(y\_{i\_{\text{opt,max}}} - y\_{i\_{\text{opt,min}}})}{2} \\] +3. Scale the candidate outputs such that for each output the sum of the magnitudes of \\(v\_i\\) and the control error (\\(e\_i\\), including measurement noise \\(n\_i\\)) is similar (e.g. \\(|v\_i| + |e\_i| = 1\\)) +4. Scale the inputs such that a unit deviation in each input from its optimal value has the same effect on the cost function \\(J\\) +5. Select as candidates those sets of controlled outputs which corresponds to a large value of \\(\underline{\sigma}(G)\\). + \\(G\\) is the transfer function for the effect of the scaled inputs on the scaled outputs + +
+ + +#### Summary {#summary} + +Generally, the optimal values of all variables will change with time during operation. +If the loss imposed by keeping constant setpoints is acceptable, then we have self-optimizing control. +The objective of the control layer is then to keep the controlled outputs at their reference values (which are computed by the optimization layer).
+ +The controlled outputs are often measured, but we may also estimated their values based on other measured variables. +We may also use other measurements to improve the control of the controlled outputs, for example, by use of cascade control. +Thus, the selection of controlled and measured outputs are two separate issues. + + +### Selection of Manipulations and Measurements {#selection-of-manipulations-and-measurements} + +We are here concerned with the variable sets \\(u\\) and \\(v\\) in Fig. [fig:general_control_names_bis](#fig:general_control_names_bis). +Note that **the measurements** \\(v\\) used by the controller **are in general different from the controlled variables** \\(z\\) because we may not be able to measure all the controlled variables and we may want to measure and control additional variables in order to: + +- Stabilize the plant, or more generally change its dynamics +- Improve local disturbance rejection + + +##### Stabilization {#stabilization} + +We usually start of controller design by designing a lower-layer controller to stabilize the plant. +The issue is then: which outputs and inputs should be used for stabilization? +A reasonable objective is to minimize the required input usage of the stabilizing control system. + + +##### Local disturbance rejection {#local-disturbance-rejection} + +For measurements, the rule is generally to select those which have a **strong relationship with the controlled outputs**, or which may **quickly detect a major disturbance**. + +The selected manipulations should have a **large effect on the controlled outputs** and should be located "close" (in terms of dynamic response) to the outputs and measurements.
+ +To evaluate the combinations of manipulations and measurements, one may perform an **input-output controllability analysis** for each combination (e.g. consider the minimum singular values, RHP-zeros, interactions, etc). +A more involved approach would be to perform a achievable robust performance analysis. +An even more involved (and exact) approach would be to synthesize controllers for optimal robust performance for each candidate combination. +However, the number of combination has a combinatorial growth and the analysis may become very time-consuming. + + +### RGA for Non-Square Plant {#rga-for-non-square-plant} + +A simple but effective tool for selecting inputs and outputs, which avoids to combinatorial problem is the **Relative Gain Array** (RGA) of the "big" transfer matrix \\(G\_\text{all}\\) with all candidates inputs and outputs included: + +\begin{equation} + \tcmbox{\Lambda = G\_{\text{all}} \times G\_{\text{all}}^{\dagger^T}} +\end{equation} + +Essentially, one may consider not using those manipulations \\(u\\) corresponding to columns in the RGA where the sum of the elements is much smaller than 1. + +Similarly, one may consider not using those outputs \\(v\\) corresponding to rows in the RGA where the sum of the elements is much small than 1. + + +### Control Configuration Elements {#control-configuration-elements} + +We now assume that the measurements, manipulations and controlled outputs are fixed. +The available synthesis theories presented in this book result in a _multivariable controller_ \\(K\\) which connects all available measurements \\(v\\) with all available manipulations \\(u\\): +\\[ u = K v \\] +However, such a "big" controller may not be desirable. + +
+
+ +We define the **control configuration** to be the restrictions imposed on the overall controller \\(K\\) by decomposing it into a set of **local controllers** with predetermined links and with a possibly predetermined design sequence where subcontrollers are designed locally. + +
+ +Some elements used to build up a specific control configuration are: + +- **Cascade controllers**. The output from one controller is the input to another +- **Decentralized controllers**. The control system consists of independent feedback controllers which interconnect a subset of the output measurements with a subset of the manipulated inputs. + These subsets should not be used by any other controller +- **Feedforward elements**. Link measured disturbances and manipulated inputs +- **Decoupling elements**. Link one set of manipulated inputs with another set of manipulated inputs. + They are used to improve the performance of decentralized control systems. +- **Selectors**: used to select for control, depending on the conditions of the system, a subset of the manipulated inputs or a subset of the outputs + +In addition to restrictions on the structure of \\(K\\), we may impose restrictions on **in which sequence the subcontrollers are designed**. +For most decomposed control systems, we design the controllers sequentially, starting with the "fast" or "inner" or "lower-layer" control loops.
+ +The choice of control configuration leads to two different ways of partitioning the control system: + +- **Vertical decomposition**. This usually results from a sequential design of the control system +- **Horizontal decomposition**. This usually involves a set of independent decentralized controllers + +Of course, a **performance loss** is inevitable if we decompose the control system. +For example, if we select a poor configuration at the lower control layer, then this may pose fundamental limitations on the achievable performance (RHP zeros, strong interactions, etc). + + +#### Cascade Control Systems {#cascade-control-systems} + +We here use SISO controllers of the form + +\begin{equation} + u\_i = K\_i(s) (r\_i - y\_i) +\end{equation} + +where \\(K\_i(s)\\) is a scalar. +Then when a SISO control loop is closed, we lose the input \\(u\_i\\) as a degree-of-freedom but the reference \\(r\_i\\) becomes a new degree-of-freedom.
+ +A cascade control structure results when either of the following two situations arise: + +- The reference \\(r\_i\\) is an output from another controller. + This is the **conventional cascade control** (Fig. [fig:cascade_extra_meas](#fig:cascade_extra_meas)) +- The "measurement" \\(y\_i\\) is an output from another controller. + This is referred to as **input resetting** (Fig. [fig:cascade_extra_input](#fig:cascade_extra_input)) + + +
+ Table 7: + Cascade Implementations +
+ +| ![](/ox-hugo/skogestad07_cascade_extra_meas.png) | ![](/ox-hugo/skogestad07_cascade_extra_input.png) | +|-------------------------------------------------------|---------------------------------------------------| +| Extra measurements \\(y\_2\\) | Extra inputs \\(u\_2\\) | + + +#### Cascade Control: Extra Measurements {#cascade-control-extra-measurements} + +Let \\(u\\) be the manipulated input, \\(y\_1\\) the controlled outputs and \\(y\_2\\) the extra measurement. +In many cases, we may use \\(y\_2\\) to provide **local disturbance rejection**, **linearization**, or to **reduce the effect of measurement noise**. +For example, velocity feedback is frequently used in mechanical systems. + + +##### Centralized (parallel) implementation {#centralized--parallel--implementation} + +A centralized implementation where \\(K\\) is a 2-inputs-1-output controller may be written + +\begin{align\*} + u &= K(s)(r - y) \\\\\\ + u &= K\_{11}(s)(r\_1 - y\_1) + K\_{12}(s)(r\_2 - y\_2) +\end{align\*} + +where in most cases \\(r\_2 = 0\\) since we do not have a degree-of-freedom to control \\(y\_2\\). + + +##### Cascade implementation {#cascade-implementation} + +To obtain an implementation with two SISO controllers, we may cascade the controllers as illustrated in Fig. [fig:cascade_extra_meas](#fig:cascade_extra_meas): + +\begin{align\*} + r\_2 &= K\_1(s)(r\_1 - y\_1) \\\\\\ + u\_2 &= K\_2(s)(r\_2 - y\_2),\ r\_2 = \hat{u}\_1 +\end{align\*} + +Note that the output \\(r\_2\\) from the slower primary controller \\(K\_1\\) is not a manipulated plant input, but rather the reference input to the faster secondary controller \\(K\_2\\). +Cascades based on measuring the actual manipulated variable (\\(y\_2 = u\_m\\)) are commonly used to **reduce uncertainty and non-linearity at the plant input**. + +In the general case (Fig. [fig:cascade_extra_meas](#fig:cascade_extra_meas)) \\(y\_1\\) and \\(y\_2\\) are not directly related to each other, and this is sometimes referred to as _parallel cascade control_. +However, it is common to encounter the situation in Fig. [fig:cascade_control](#fig:cascade_control) where the primary output \\(y\_1\\) depends directly on \\(y\_2\\) which is a special case of Fig. [fig:cascade_extra_meas](#fig:cascade_extra_meas). + +
+
+ +With reference to the special (but common) case of cascade control shown in Fig. [fig:cascade_control](#fig:cascade_control), the use of extra measurements is useful under the following circumstances: + +- The disturbance \\(d\_2\\) is significant and \\(G\_1\\) is non-minimum phase. + If \\(G\_1\\) is minimum phase, the input-output controllability of \\(G\_2\\) and \\(G\_1 G\_2\\) are the same and there is no fundamental advantage in measuring \\(y\_2\\) +- The plant \\(G\_2\\) has considerable uncertainty associated with it and the inner loop serves to remove the uncertainty. + The inner loop \\(L\_2 = G\_2 K\_2\\) removes the uncertainty if it is sufficiently fast and yields a transfer function \\((I + L\_2)^{-1} L\_2\\) close to \\(I\\) at frequencies where \\(K\_1\\) is active. + +
+ + + +{{< figure src="/ox-hugo/skogestad07_cascade_control.png" caption="Figure 59: Common case of cascade control where the primary output \\(y\_1\\) depends directly on the extra measurement \\(y\_2\\)" >}} + +In terms of design, it is recommended to first design \\(K\_2\\) to minimize the effect of \\(d\_2\\) on \\(y\_1\\) and then to design \\(K\_1\\) to minimize the effect of \\(d\_1\\) on \\(y\_1\\). + + +#### Cascade Control: Extra Inputs {#cascade-control-extra-inputs} + +In some cases we have more manipulated inputs than controlled outputs. +These may be used to improve control performance. + + +##### Centralized implementation {#centralized-implementation} + +A centralized implementation where \\(K\\) is a 1-input-2-outputs controller may be written +\\[ u\_1 = K\_{11}(s)(r-y); \quad u\_2 = K\_{21}(s)(r-y) \\] +Here two inputs are used to control one output. +We usually let \\(K\_{11}\\) have integral control whereas \\(K\_{21}\\) does not. +Then \\(u\_2(t)\\) will only be used for **transient control** and will return to \\(0\\) as \\(t \to \infty\\). + + +##### Cascade implementation {#cascade-implementation} + +To obtain an implementation with two SISO controllers we may cascade the controllers as shown in Fig. [fig:cascade_extra_input](#fig:cascade_extra_input). +We again let input \\(u\_2\\) take care of the **fast control** and \\(u\_1\\) of the **long-term control**. +The fast control loop is then +\\[ u\_2 = K\_2(s)(r - y) \\] +The objective of the other slower controller is then to use input \\(u\_1\\) to reset input \\(u\_2\\) to its desired value \\(r\_{u\_2}\\): +\\[ u\_1 = K\_1(s)(r\_{u\_2} - y\_1), \ y\_1 = u\_2 \\] +and we see that the output from the fast controller \\(K\_2\\) is the "measurement" for the slow controller \\(K\_1\\). + +The cascade implementation again has the **advantage of decoupling the design of the two controllers**. +It also shows more clearly that \\(r\_{u\_2}\\), the reference for \\(u\_2\\), may be used as a degree-of-freedom at higher layers in the control system. + +
+
+ +Consider the system in Fig. [fig:cascade_control_two_layers](#fig:cascade_control_two_layers) with two manipulated inputs (\\(u\_2\\) and \\(u\_3\\)), one controlled output (\\(y\_1\\) which should be close to \\(r\_1\\)) and two measured variables (\\(y\_1\\) and \\(y\_2\\)). +Input \\(u\_2\\) has a more direct effect on \\(y\_1\\) than does input \\(u\_3\\) (there is a large delay in \\(G\_3(s)\\)). +Input \\(u\_2\\) should only be used for transient control as it is desirable that it remains close to \\(r\_3 = r\_{u\_2}\\). +The extra measurement \\(y\_2\\) is closer than \\(y\_1\\) to the input \\(u\_2\\) and may be useful for detecting disturbances affecting \\(G\_1\\). + +Controller \\(K\_1\\) controls the primary output \\(y\_1\\) at its reference \\(r\_1\\) by adjusting the "input" \\(\hat{u}\_1\\), which is the reference value for \\(y\_2\\). +Controller \\(K\_2\\) controls the secondary output \\(y\_2\\) using input \\(u\_2\\). +Finally, controller \\(K\_3\\) manipulates \\(u\_3\\) slowly in order to reset input \\(u\_2\\) to its desired value \\(r\_3\\). +We would probably tune the three controllers in the order \\(K\_2\\), \\(K\_3\\), and \\(K\_1\\). + +
+ + + +{{< figure src="/ox-hugo/skogestad07_cascade_control_two_layers.png" caption="Figure 60: Control configuration with two layers of cascade control" >}} + + +#### Selectors {#selectors} + + +##### Slip-range control for extra input {#slip-range-control-for-extra-input} + +Sometimes the input constraints make it necessary to add a manipulated input. +In this case the control range is often split such that, for example, \\(u\_1\\) is used for control when \\(y \in [y\_\text{min}, y\_1]\\) and \\(u\_2\\) is used when \\(y \in [y\_1, y\_\text{max}]\\). + + +##### Selector for too few inputs {#selector-for-too-few-inputs} + +A completely different situation occurs if there are fewer inputs than outputs. +In such case, we cannot control all the outputs independently, so we either need to control all the outputs in some average manner, or we need to make a choice about which outputs are the most important to control. +Selectors are often used for the latter option. + + +#### Why use Cascade and Decentralized Control? {#why-use-cascade-and-decentralized-control} + +Decomposed control configuration can easily become quite complex and difficult to maintain and understand. +It may therefore be both simpler and better in terms of control performance to set up the controller design problem as an optimization problem and let the computer do the job, resulting in a **centralized multivariable controller**.
+ +However, there are a **number of reason why cascade and decentralized control are used in practice**. +The most important one is the **cost associated with obtaining good plant models**, which are a prerequisite for applying multivariable control. +Since cascade and decentralized control systems depend more strongly on feedback rather than models as their source of information, it is usually more important (relative to centralized multivariable control) that the fast control loops be tuned to respond quickly. + +The cascade and decentralized control are often easier to understand, their tuning parameters have a direct and "localized" effect, and they tend to be **less sensitive to uncertainty**.
+ +The **main challenge** is then to find a control configuration which allows the controllers to be tuned independently based on a minimum of model information. +To be able to tune the controllers independently, we must require that the loops interact only to a limited extent. +For example, one desirable property is that the steady-state gain from \\(u\_i\\) to \\(y\_i\\) in an "inner" loop does not change too much as outer loops are closed. + + +### Hierarchical and Partial Control {#hierarchical-and-partial-control} + + +#### Partial Control {#partial-control} + +
+
+ +Partial control involves controlling only a subset of the outputs for which there is a control objective. + +
+ +We divide the outputs \\(y\\) into two classes: + +- \\(y\_1\\) - (temporarily) uncontrolled output +- \\(y\_2\\) - (locally) measured and controlled output + +We also subdivide the available manipulated inputs \\(u\\): + +- \\(u\_2\\) - inputs used for controlling \\(y\_2\\) +- \\(u\_1\\) - remaining inputs + +Four applications of partial control are: + +1. **Sequential design on decentralized controllers.** + Both \\(y\_1\\) and \\(y\_2\\) have an associated control objective. + First, a controller \\(K\_2\\) is designed to control \\(y\_2\\). + Then, a controlled \\(K\_1\\) may be designed for the remaining outputs. +2. **Sequential design of conventional cascade control.** + The outputs \\(y\_2\\) are additional measured variables which are not important variables in themselves. + The reason for controlling \\(y\_2\\) is to improve the control of \\(y\_1\\). + The references \\(r\_2\\) are used as degrees-of-freedom for controlling \\(y\_1\\). +3. **"true" partial control.** + Both \\(y\_1\\) and \\(y\_2\\) have an associated control objective. + We consider whether by controlling only the subset \\(y\_2\\) we can indirectly achieve acceptable control of \\(y\_1\\). +4. **Indirect control.** + The outputs \\(y\_1\\) have an associated control objective but are not measured. + Instead, we aim at indirectly controlling \\(y\_1\\) by controlling the secondary measured variables \\(y\_2\\). + +The table [tab:partial_control](#tab:partial_control) shows clearly the differences between the four applications of partial control. +In all cases, there is a control objective associated with \\(y\_1\\) and a feedback involving measurement and control of \\(y\_2\\) and we want: + +- The effect of disturbances on \\(y\_1\\) to be small (when \\(y\_2\\) is controlled) +- The control of \\(y\_2\\) using \\(u\_2\\) to be (dynamically) easy + + +
+ Table 8: + Applications of partial control +
+ +| Control | Meas. and control of \\(y\_1\\)? | Control objective for \\(y\_2\\)? | +|---------------------|----------------------------------|-----------------------------------| +| Sequ. decentralized | Yes | Yes | +| Sequ. cascade | Yes | No | +| "True" partial | No | Yes | +| Indirect | No | No | + +By partitioning the inputs and outputs, the overall model \\(y = G u\\) can be written + +\begin{equation} + \begin{aligned} + y\_1 &= G\_{11} u\_1 + G\_{12} u\_2 + G\_{d1} d\\\\\\ + y\_2 &= G\_{21} u\_1 + G\_{22} u\_2 + G\_{d2} d + \end{aligned} +\end{equation} + +Assume now that feedback control \\(u\_2 = K\_2(r\_2 - y\_2 - n\_2)\\) is used for the "secondary" subsystem involving \\(u\_2\\) and \\(y\_2\\) (Fig. [fig:partial_control](#fig:partial_control)). +We get: + +\begin{equation} + \begin{aligned} + y\_1 = &(G\_{11} - G\_{12}K\_2(I + G\_{22}K\_2)^{-1}G\_{21})u\_1 \\\\\\ + & + (G\_{d1} - G\_{12}K\_2(I + G\_{22}K\_2)^{-1}G\_{d2})d \\\\\\ + & + G\_{12} K\_2 (I + G\_{22}K\_2)^{-1}(r\_2 - n\_2) + \end{aligned} +\end{equation} + + + +{{< figure src="/ox-hugo/skogestad07_partial_control.png" caption="Figure 61: Partial Control" >}} + + +##### Tight control of \\(y\_2\\) {#tight-control-of--y-2} + +In some cases, we can assume that the control of \\(y\_2\\) is fast compared to the control of \\(y\_1\\) so we may let \\(K\_2 \to \infty\\) to get: +\\[ u\_2 = -G\_{22}^{-1} G\_{d2} d - G\_{22}^{-1} G\_{21} u\_1 + G\_{22}^{-1} y\_2 \\] + +The dynamics of the system becomes: + +\begin{equation} + \begin{aligned} + y\_1 = &\underbrace{(G\_{11} - G\_{12} G\_{22}^{-1} G\_{21})}\_{\triangleq P\_u} u\_1 \\\\\\ + & + \underbrace{(G\_{d1} - G\_{12} G\_{22}^{-1} G\_{d2})}\_{\triangleq P\_d} d + \underbrace{G\_{12} G\_{22}^{-1}}\_{\triangleq P\_r} \underbrace{(r\_2 - e\_2)}\_{y\_2} + \end{aligned} +\end{equation} + +where + +- \\(P\_d\\) is called the **partial disturbance gain**, which is the disturbance gain for a system under perfect partial control +- \\(P\_u\\) is the effect of \\(u\_1\\) on \\(y\_1\\) with \\(y\_2\\) perfectly controlled + +The obtained dynamics is independent of \\(K\_2\\), but this only applies at frequencies where \\(y\_2\\) is tightly controlled. + + +#### Hierarchical Control and Sequential Design {#hierarchical-control-and-sequential-design} + +A **hierarchical control system** results when we design the subcontrollers in a **sequential manner**, usually **starting with the fast loops**. +This means that the controller at some higher layer in the hierarchy is designed based on a partially controlled plant. + +The idea is to first implement a local lower-layer control system for controlling the outputs \\(y\_2\\). +Next, with this lower-layer in place, we design a controller \\(K\_1\\) to control \\(y\_1\\).
+ +The objectives for this hierarchical decomposition are: + +- to allow for simple or even on-line tuning of \\(K\_2\\) +- to allow the use of longer sampling intervals for \\(K\_1\\) +- to allow simple models when designing \\(K\_1\\) +- to "stabilize" the plant using \\(K\_2\\) such that it is amenable to manual control + +
+
+ +The selection of \\(u\_2\\) and \\(y\_2\\) for use in the lower-layer control system can be done with the following criteria: + +- The lower-layer must quickly implement the setpoints computed by the higher layers, that is, the input-output controllability of the subsystem involving the use of \\(u\_2\\) to control \\(y\_2\\) should be good (consider \\(G\_{22}\\) and \\(G\_{d2}\\)) +- The control of \\(y\_2\\) using \\(u\_2\\) should provide local disturbance rejection, that is, it should minimize the effect of disturbances on \\(y\_1\\) +- The control of \\(y\_2\\) using \\(u\_2\\) should not impose unnecessary control limitations (RHP-zero, ill-conditioning, etc.) on the remaining control problem which involves using \\(u\_1\\) to control \\(y\_1\\) + +
+ + +##### Sequential design of cascade control systems {#sequential-design-of-cascade-control-systems} + +Consider the conventional cascade control system in Fig. [fig:cascade_extra_meas](#fig:cascade_extra_meas) where we have additional "secondary" measurements \\(y\_2\\) with no associated control objective, and the objective is to improve the control of \\(y\_1\\) by locally controlling \\(y\_2\\). +The idea is that this should reduce the effect of disturbances and uncertainty on \\(y\_1\\). + +From [eq:partial_control](#eq:partial_control), it follows that we should select \\(y\_2\\) and \\(u\_2\\) such that \\(\\|P\_d\\|\\) is small and at least smaller than \\(\\|G\_{d1}\\|\\). +These arguments particularly apply at high frequencies. +More precisely, we want the input-output controllability of \\([P\_u\ P\_r]\\) with disturbance model \\(P\_d\\) to be better that of the plant \\([G\_{11}\ G\_{12}]\\) with disturbance model \\(G\_{d1}\\). + + +#### "True" Partial Control {#true-partial-control} + +We here consider the case where we attempt to leave a set of primary outputs \\(y\_1\\) uncontrolled. +This may be possible in cases where the outputs are correlated such that controlling the outputs \\(y\_2\\) indirectly gives acceptable control of \\(y\_1\\). + +
+
+ +A set of outputs \\(y\_1\\) may be left uncontrolled only if the effects of all disturbances (including \\(r\_2\\)) on \\(y\_1\\), as expressed by the elements in the corresponding partial disturbance gain matrix \\(P\_d\\) are less than \\(1\\) in magnitude at all frequencies. + +
+ +To evaluate the feasibility of partial control, one must for each choice of \\(y\_2\\) and \\(u\_2\\), rearrange the system as in [eq:partial_control_partitioning](#eq:partial_control_partitioning) and [eq:partial_control](#eq:partial_control), and compute \\(P\_d\\) using [eq:tight_control_y2](#eq:tight_control_y2). + + +#### Measurement Selection for Indirect Control {#measurement-selection-for-indirect-control} + +Assume the overall goal is to keep some variable \\(y\_1\\) at a given value \\(r\_1\\), e.g. our objective is to minimize \\(J = \\|y\_1 - r\_1\\|\\). +We assume that we cannot measure \\(y\_1\\), and instead we attempt to achieve our goal by controlling \\(y\_2\\) at a constant value \\(r\_2\\). +For small changes, we may assume linearity and write: + +\begin{align\*} + y\_1 &= G\_1 u + G\_{d1} d\\\\\\ + y\_2 &= G\_2 u + G\_{d2} d +\end{align\*} + +With feedback control of \\(y\_2\\) we get \\(y\_2 = r\_2 + e\_2\\) where \\(e\_2\\) is the control error. +From the above two equations, we obtain +\\[ y\_1 = (G\_{d1} - G\_1 G\_2^{-1} G\_{d2})d + G\_1 G\_2^{-1} (r\_2 + e\_2) \\] + +With \\(e\_2 = 0\\) and \\(d = 0\\) this gives \\(y\_1 = G\_1 G\_2^{-1} r\_2\\), so \\(r\_2\\) must be chosen such that +\\[ r\_1 = G\_1 G\_2^{-1} r\_2 \\] + +The control error in the primary output is then + +\begin{equation} + y\_1 - r\_1 = \underbrace{(G\_{d1} - G\_1 G\_2^{-1} G\_{d2})}\_{P\_d} d + \underbrace{G\_1 G\_2^{-1}}\_{P\_r} e\_2 +\end{equation} + +To minimize \\(J\\), we should therefore select controlled outputs such that \\(\\|P\_d d\\|\\) and \\(\\|P\_r e\_2\\|\\) are small. +Note that \\(P\_d\\) depends on the scaling of \\(d\\) and \\(y\_1\\). +Also the magnitude of \\(e\_2\\) depends on the choice of outputs \\(y\_2\\). + +
+
+ +Scale the disturbances \\(d\\) to be of magnitude 1, and scale the outputs \\(y\_2\\) so that the expected control error \\(e\_2\\) (measurement noise) is of magnitude 1 for each outputs. +Then to minimize the control error for the primary output, \\(J = \\|y\_1 - r\_1\\|\\), we should select sets of controlled outputs which minimizes \\(\\|[ P\_d \ P\_r]\\|\\). + +
+ + +### Decentralized Feedback Control {#decentralized-feedback-control} + +In this section, \\(G(s)\\) is a square plant which is to be controlled using a diagonal controller (Fig. [fig:decentralized_diagonal_control](#fig:decentralized_diagonal_control)). + + + +{{< figure src="/ox-hugo/skogestad07_decentralized_diagonal_control.png" caption="Figure 62: Decentralized diagonal control of a \\(2 \times 2\\) plant" >}} + +The design of **decentralized diagonal control systems** involves two steps: + +1. The choice of pairing (control configuration selection) +2. The design of each controller \\(k\_i(s)\\) + +\\[ K(s) = \text{diag}\\{k\_i(s)\\} = \begin{bmatrix} + k\_1(s) & & & \\\\\\ + & k\_2(s) & & \\\\\\ + & & \ddots & \\\\\\ + & & & k\_m(s) +\end{bmatrix} \\] + + +#### Notations for decentralized diagonal control {#notations-for-decentralized-diagonal-control} + +\\(G(s)\\) denotes a square \\(m \times m\\) plant with elements \\(g\_{ij}\\). +\\(G^{ij}(s)\\) denotes the remaining \\((m-1) \times (m-1)\\) plant obtained by removing row \\(i\\) and column \\(j\\) in \\(G(s)\\). +We introduce: +\\[ \tilde{G} \triangleq \text{diag}\\{g\_{ii}\\} = \begin{bmatrix} + g\_{11} & & & \\\\\\ + & g\_{22} & & \\\\\\ + & & \ddots & \\\\\\ + & & & g\_{mm} \\\\\\ +\end{bmatrix} \\] +The loop transfer function in loop \\(i\\) is denoted \\(L\_i = g\_{ii} k\_i\\). + + +#### RGA as a Measure of the Interaction for Decentralized Control {#rga-as-a-measure-of-the-interaction-for-decentralized-control} + +Let \\(u\_j\\) and \\(y\_i\\) denote a particular input and output for the multivariable plant \\(G(s)\\) and assume that our task is to use \\(u\_j\\) to control \\(y\_i\\). +There are two extreme cases: + +- **Other loops open**: \\(u\_k = 0, \forall k \neq j\\) +- **Other loops closed**: \\(y\_k = 0, \forall k \neq i\\). + It is assumed that the other loop are closed with perfect control which is a good approximation at frequencies within the bandwidth of each loop + +We now evaluate the effect \\(\partial y\_i / \partial u\_j\\) for the two cases: + +\begin{subequations} + \begin{align} + & \left( \frac{\partial y\_i}{\partial u\_j} \right)\_{u\_k = 0, k \neq j} = g\_{ij} = [G]\_{ij}\\\\\\ + & \left( \frac{\partial y\_i}{\partial u\_j} \right)\_{y\_k = 0, k \neq i} \triangleq \hat{g}\_{ij} = 1/[G^{-1}]\_{ji} + \end{align} +\end{subequations} + +The ratio between the gains corresponding the two extreme cases is a useful **measure of interactions** and is defined as the \\(ij\text{'th}\\) **relative gain**: + +\begin{equation} + \tcmbox{\lambda\_{ij} \triangleq \frac{g\_{ij}}{\hat{g}\_{ij}} = [G]\_{ij}[G^{-1}]\_{ji}} +\end{equation} + +The **Relative Gain Array** (RGA) is the corresponding matrix of relative gains: + +\begin{equation} + \tcmbox{\Lambda(G) = G \times (G^{-1})^T} +\end{equation} + +where \\(\times\\) denotes element-by-element multiplication. + +
+
+ +Intuitively, we would like to pair variables \\(u\_j\\) and \\(y\_i\\) so that \\(\lambda\_{ij}\\) is close to \\(1\\), because this means that the gain from \\(u\_j\\) to \\(y\_i\\) is unaffected by closing the other loops. +More precisely, we would like to pair such that the rearranged system, with the pairings along the diagonal, has a RGA matrix close to identity. + +
+ + +#### Factorization of Sensitivity Function {#factorization-of-sensitivity-function} + +The magnitude of the off-diagonal elements in \\(G\\) (the interactions) relative to its diagonal elements are given by the matrix + +\begin{equation} + E \triangleq (G - \tilde{G})\tilde{G}^{-1} +\end{equation} + +An important relationship for decentralized control is: + +\begin{equation} + \tcmbox{\underbrace{(I + G K)}\_{\text{overall}} = \underbrace{(I + E \tilde{T})}\_{\text{interactions}} \quad \underbrace{(I + \tilde{G} K)}\_{\text{individual loops}}} +\end{equation} + +or equivalently in terms of the sensitivity function: + +\begin{equation} + \tcmbox{S = \tilde{S} (I + E \tilde{T})^{-1}} +\end{equation} + +with + +\begin{align\*} + \tilde{S} &\triangleq (I + \tilde{G}K)^{-1} = \text{diag}\left\\{\frac{1}{1 + g\_{ii} k\_i}\right\\} \\\\\\ + \tilde{T} &= I - \tilde{S} +\end{align\*} + +which contain the sensitivity and complementary sensitivity functions for the individual loops. +Note that \\(\tilde{S}\\) is not equal to the matrix of diagonal elements of \\(S\\). + + +#### Stability of Decentralized Control Systems {#stability-of-decentralized-control-systems} + +Consider a \\(m \times m\\) plant with single-loop controllers. +There are \\(m!\\) alternative pairings possible. +Thus tools are needed for quickly evaluating alternative pairings. +In this section, we first derive **sufficient conditions for stability** which may be used to select promising pairings. +We then derive **necessary conditions for stability** which may be used to eliminate undesirable pairings. + + +##### Sufficient conditions for stability {#sufficient-conditions-for-stability} + +For decentralized diagonal control, it is desirable that the system can be tuned and operated one loop at a time. +Assume therefore that \\(G\\) is stable and each individual loop is stable by itself (\\(\tilde{S}\\) and \\(\tilde{T}\\) are stable). +Using the **spectral radius condition** on the factorized \\(S\\) in [eq:S_factorization](#eq:S_factorization), we have that the overall system is stable (\\(S\\) is stable) if + +\begin{equation} + \rho(E\tilde{T}(j\omega)) < 1, \forall\omega +\end{equation} + +**Sufficient conditions in terms of \\(E\\)**. +Assume \\(G\\) is stable and that the individual loops are stable (\\(\tilde{T}\\) is stable). +The least conservative approach is to use \\(\rho(E\tilde{T}) \leq \mu(E) \maxsv(\tilde{T})\\). +Then the entire system is closed-loop stable (\\(T\\) is stable) if + +\begin{equation} + \tcmbox{\maxsv(\tilde{T}) = \max\_i |\tilde{t}\_i| < 1 / \mu(E) \quad \forall\omega} +\end{equation} + +\\(\mu(E)\\) is called the **structured singular value interaction measure**, and is computed with respect to the diagonal structure of \\(\tilde{T}\\) where we may view \\(\tilde{T}\\) as the "design uncertainty". + +We usually would like to use integral action in the loops, that is we want \\(\tilde{T} \approx I\\) at low frequencies, i.e. \\(\maxsv(\tilde{T}) \approx 1\\). +Thus, we prefer pairings for which we have \\(\mu(E) < 1\\) at low frequencies where we have tight control. +This ensures a "generalized diagonal dominance".
+ +**Sufficient conditions in terms of RGA**. +Suppose the plant \\(G(s)\\) is stable. If the RGA-matrix \\(\Lambda(G) = I\ \forall\omega\\) (which can only arise for a triangular plant \\(G(s)\\)), then stability of each of the individual loops implies stability of the entire system. + +In most cases, it is sufficient for overall stability to require that \\(G(j\omega)\\) is close to triangular (or \\(\Lambda(G) \approx I\\)) at crossover frequencies. +This gives the "first pairing rule". + +
+
+ +To achieve stability with decentralized control, prefer pairings such that at frequencies \\(\omega\\) around crossover, the rearranged matrix \\(G(j\omega)\\) (with the paired elements along the diagonal) is close to triangular. +This is equivalent to requiring \\(\Lambda(G(j\omega)) \approx I\\), i.e. the RGA-number \\(\\|\Lambda(G(j\omega)) - I\\|\_\text{sum}\\) should be small. + +
+ + +##### Necessary steady-state conditions for stability {#necessary-steady-state-conditions-for-stability} + +A desirable property of a decentralized control system is that it has **integrity**, i.e. the closed loop system should remain stable as subsystem controllers are brought in and out of service. +Mathematically, the system possesses integrity if it remains stable when the controller \\(K\\) is replace by \\(\mathbb{E}K\\) where \\(\mathbb{E} = \text{diag}\\{\epsilon\_i\\}, \ \epsilon\_i=0,1\\).
+ +An even stronger requirement is that the system remains stable as the gain in various loops are reduced: \\(0 \le \epsilon\_i \le 1\\). + +
+
+ +The plant \\(G(s)\\) (corresponding to a given pairing with the paired elements along its diagonal) is **Decentralized Integral Controllability** (DIC) if there exists a stabilizing decentralized controller with **integral action in each loop** such that each individual loop may be detuned independently by a factor \\(\epsilon\_1\\) (\\(0 \le \epsilon\_i \le 1\\)) without introducing instability. + +
+ +**Steady-State RGA and DIC**. +Consider a stable square plant \\(G\\) and a diagonal controller \\(K\\) with integral action in all elements, and assume that the loop transfer function \\(GK\\) is strictly proper. +If a pairing of outputs and manipulated inputs corresponds to a **negative steady-state relative gain**, then the closed-loop system has at least one of the following properties: + +- The overall closed-loop system is unstable +- The loop with the negative relative gain is unstable by itself +- The closed-loop system is unstable if the loop with the negative relative gain is opened + +This can be summarized as follows: + +
+
+ +\begin{equation} +\begin{aligned} + &\text{A stable (reordered) plant } G(s)\\\\\\ + &\text{is DIC only if } \lambda\_{ii}(0) \ge 0 \text{ for all } i +\end{aligned} +\end{equation} + +
+ + +#### The RGA and RHP-zeros: Further reasons for not pairing on negative RGA elements {#the-rga-and-rhp-zeros-further-reasons-for-not-pairing-on-negative-rga-elements} + +With decentralized control, we usually design and implement the controller by tuning and closing one loop at a time in a sequential manner. +Assume that we pair on a negative steady-state RGA-element, \\(\lambda\_{ij}(0) < 0\\), assume that \\(\lambda\_{ij}(\infty)\\) is positive, and assume that the element \\(g\_{ij}\\) has no RHP-zero. +We have the following implications: + +- If we start by closing the loop involving input \\(u\_i\\) and \\(y\_j\\), then we will get a RHP-zero in \\(G^{ij}(s)\\) which will limit the performance in the other outputs +- If we end by closing this loop, then we will get a RHP-zero in \\(\hat{g}\_{ij}(s)\\) which will limit the performance in output \\(y\_i\\) + +
+
+ +For a stable plant, avoid pairings that corresponds to negative steady-state RGA-elements \\(\lambda\_{ij}(0) < 0\\) + +
+ +
+
+ +\begin{align\*} +G(0) &= \begin{bmatrix} + 10.2 & 5.6 & 1.4 \\\\\\ + 15.5 & -8.4 & -0.7 \\\\\\ + 18.1 & 0.4 & 1.8 +\end{bmatrix} \\\\\\ +\Lambda(0) &= \begin{bmatrix} + 0.96 & 1.45 & -1.41 \\\\\\ + 0.94 & -0.37 & 0.43 \\\\\\ + -0.90 & -0.07 & 1.98 +\end{bmatrix} +\end{align\*} + +For a \\(3 \times 3\\) plant there are 6 alternative pairings. +From the steady state RGA, we see that there is only one positive element in columns 2, and only positive element in row 3, and therefore there is only on possible pairing if we require DIC: +\\[ u\_1 \leftrightarrow y\_2,\ u\_2 \leftrightarrow y\_1,\ u\_3 \leftrightarrow y\_3 \\] + +
+ +
+
+ +\begin{align\*} +G(s) &= \frac{-s + 1}{(5 s + 1)^2} \begin{bmatrix} + 1 & 4 & -26 \\\\\\ + 6.2 & 1 & -26 \\\\\\ + 1 & 1 & 1 +\end{bmatrix}\\\\\\ +\Lambda(G) &= \begin{bmatrix} + 1 & 5 & -5 \\\\\\ + -5 & 1 & 5 \\\\\\ + 5 & -5 & 1 +\end{bmatrix} +\end{align\*} + +Only two of the six possible pairings gives positive steady-state RGA-elements: the diagonal pairing on all \\(\lambda\_{ii} = 1\\) or the pairing on all \\(\lambda\_{ii} = 5\\). +Intuitively, one may expect pairing with \\(\lambda\_{ii} = 1\\) since it corresponds to pairing on RGA-elements equal to \\(1\\). +However, the RGA matrix is far from identify, and the RGA-number \\(\\| \Lambda - I \\|\_\text{sum} = 30\\) for both alternative. +Thus none of the two alternatives satisfy _Pairing Rule 1_, and decentralized control should not be used for this plant. + +
+ + +#### Performance of Decentralized Control Systems {#performance-of-decentralized-control-systems} + +To study performance, we use the following factorization + +\begin{equation} + S = (I + \tilde{S}(\Gamma - I)^{-1}) \tilde{S} \Gamma +\end{equation} + +where \\(\Gamma\\) is the **Performance Relative Gain Array** (PRGA) + +\begin{equation} + \tcmbox{\Gamma(s) \triangleq \tilde{G}(s) G^{-1}(s)} +\end{equation} + +which is a scaled inverse of the plant.
+ +At frequencies where feedback is effective (\\(\tilde{S} \approx 0\\)), \\(S \approx \tilde{S} \Gamma\\) which shows that \\(\Gamma\\) is important when evaluating performance with decentralized control.
+ +Note that the diagonal elements of the PRGA-matrix are equal to the diagonal elements of the RGA and that the off-diagonal elements of the PRGA depend on the relative scaling on the outputs which is not the case for the RGA.
+ +We will also use the related **Closed-Loop Disturbance Gain** (CLDG) matrix: + +\begin{equation} + \tcmbox{\tilde{G}\_d(s) \triangleq \Gamma(s)G\_d(s) = \tilde{G}(s) G^{-1}(s) G\_d(s)} +\end{equation} + +which depends on both output and disturbance scaling.
+ +Suppose the system has been scaled such that: + +- Each disturbance magnitude is less than \\(1\\), \\(|d\_k| < 1\\) +- Each reference change is less than the corresponding diagonal element in \\(R\\), \\(|r\_j| < R\_j\\) +- For each output the acceptable control error is less than \\(1\\), \\(|e\_i| < 1\\) + + +##### Single disturbance {#single-disturbance} + +Consider a single disturbance, in which case \\(G\_d\\) is a vector, and let \\(g\_{di}\\) denote the \\(i\text{'th}\\) element of \\(G\_d\\). +Let \\(L\_i = g\_{ii} k\_i\\) denote the loop transfer function in loop \\(i\\). +Consider frequencies where feedback is effective so \\(\tilde{S}\Gamma\\) is small. +Then for **acceptable disturbance rejection** (\\(|e\_i| < 1\\)) we must with decentralized control required for each loop \\(i\\) + +\begin{equation} + \tcmbox{|1 + L\_i| > |\tilde{g}\_{di}| \quad \forall i} +\end{equation} + +which is the same as the SISO-condition except that \\(G\_d\\) is replaced by the CLDG. +In words, \\(\tilde{g}\_{di}\\) gives the "apparent" disturbance gain as seen from the loop \\(i\\) when the system is controlled using decentralized control. + + +##### Single reference change {#single-reference-change} + +Consider a change in reference for output \\(j\\) of magnitude \\(R\_j\\). +Consider frequencies where feedback is effective. +Then for **acceptable reference tracking** (\\(|e\_i|<1\\)) we must require for each loop \\(i\\) + +\begin{equation} + \tcmbox{|1 + L\_i| > |\gamma\_{ij}| \cdot |R\_j| \quad \forall i} +\end{equation} + +which is the same as the SISO-condition except for the PRGA-factor \\(|\gamma\_{ij}|\\). + +Consequently, for performance it is desirable to have small elements in \\(\Gamma\\), at least at frequencies where feedback is effective. +However, at frequencies close to crossover, stability is the main issue and since the diagonal elements of the PRGA and RGA are equal, we usually prefer to have \\(\gamma\_{ii}\\) close to \\(1\\). + + +#### Summary: Controllability Analysis for Decentralized Control {#summary-controllability-analysis-for-decentralized-control} + +When considering decentralized diagonal control of a plant, one should first check that the plant is controllable with any controller. +The next step is to compute the RGA matrix as a function of frequency, and to determine if one can find a good set of input-output pairs bearing in mind the following: + +1. Prefer pairings which have the **RGA-matrix close to identity at frequencies around crossover**, i.e. the RGA-number \\(\\|\Lambda(j\omega)-I\\|\\) should be small +2. Avoid a pairing \\(ij\\) with negative steady-state RGA elements \\(\lambda\_{ij}(G(0)\\) +3. Prefer a pairing \\(ij\\) where \\(g\_{ij}(s)\\) puts minimal restrictions on the achievable bandwidth. + Specifically, the frequency \\(\omega\_{uij}\\) where \\(\angle g\_{ij}(j\omega\_{uij}) = \SI{-180}{\degree}\\) should be as large as possible + This rule favors parings on variables "close to each other" + +When a reasonable choice of pairings have been made, one should rearrange \\(G\\) to have the **paired elements along the diagonal** and perform a **controllability analysis**: + +
    +
  1. Compute the CLDG and PRGA, and plot these as a function of frequency
  2. +
  3. For systems with many loops, it is best to perform the analysis one loop at the time, that is, for each loop \\(i\\), plot \\(|\tilde{g}\_{dik}|\\) for each disturbance \\(k\\) and plot \\(|\gamma\_{ij}|\\) for each reference \\(j\\). +For performance, we need \\(|1 + L\_i|\\) to be larger than each of these: + +\begin{equation} + |1 + L\_i| > \max\_{k,j}\\{|\tilde{g}\_{dik}|, |\gamma\_{ij}|\\} +\end{equation} + +To achieve stability of the individual loops, one must analyze \\(g\_{ii}(s)\\) to ensure that the bandwidth required by [eq:decent_contr_one_loop](#eq:decent_contr_one_loop) is achievable. +Note that RHP-zeros in the diagonal elements may limit achievable decentralized control, whereas they may not pose any problems for a multivariable controller. +Since with decentralized control, we usually want to use simple controllers, the achievable bandwidth in each loop will be limited by the frequency where \\(\angle g\_{ii}\\) is \\(\SI{-180}{\degree}\\)
  4. +
  5. Check for constraints by considering the elements of \\(G^{-1} G\_d\\) and make sure that they do not exceed one in magnitude within the frequency range where control is needed. +Equivalently, one may for each loop \\(i\\), plot \\(|g\_{ii}|\\) and the requirement is then that + +\begin{equation} + |g\_{ii}| > |\tilde{g}\_{dik}| \quad \forall k +\end{equation} + +at frequencies where \\(|\tilde{g}\_{dik}|\\) is larger than \\(1\\). +This provides a direct generalization of the requirement \\(|G| > |G\_d|\\) for SISO systems.
  6. +
+ +If the plant is not controllable, then one may consider another choice of pairing and go back to Step 4. +If one still cannot find any pairing which are controllable, then one should consider multivariable control. + +
    +
  1. If the chosen pairing is controllable, then [eq:decent_contr_one_loop](#eq:decent_contr_one_loop) tells us how large \\(|L\_i| = |g\_{ii} k\_i|\\) must be. +This can be used as a basis for designing the controller \\(k\_i(s)\\) for loop \\(i\\)
  2. +
+ + +#### Sequential Design of Decentralized Controllers {#sequential-design-of-decentralized-controllers} + +Usually the local controllers \\(k\_i(s)\\) are designed locally and then all the loops are closed. +One problem with this is that the **interactions** may cause the overall system \\(T\\) so be unstable, even though the local loops \\(\tilde{T}\\) are stable. +This will not happen if the plant is **diagonally dominant**, such that we satisfy, for example \\(\maxsv(\tilde{T}) < 1/\mu(E)\\).
+ +The stability problem is avoided if the controllers are **designed sequentially** when, for example, the bandwidths of the loops are quite different. +In this case, the outer loops are tuned with the inner loops in place, and each step may be considered as a SISO control problem. +In particular, overall stability is determined by \\(m\\) SISO stability conditions. +However, the issue of performance is more complicated because the closing of a loop may cause "disturbances" (interactions) into a previously designed loop. +The engineer must then go back and redesign a loop that has been designed earlier. +Thus sequential design may involve many iterations. + + +#### Conclusion on Decentralized Control {#conclusion-on-decentralized-control} + +A number of **conditions for the stability**, e.g. [eq:decent_contr_cond_stability](#eq:decent_contr_cond_stability) and [eq:decent_contr_necessary_cond_stability](#eq:decent_contr_necessary_cond_stability), and **performance**, e.g. [eq:decent_contr_cond_perf_dist](#eq:decent_contr_cond_perf_dist) and [eq:decent_contr_cond_perf_ref](#eq:decent_contr_cond_perf_ref), of decentralized control systems have been derived. + +The conditions may be useful in **determining appropriate pairings of inputs and outputs** and the **sequence in which the decentralized controllers should be designed**. + +The conditions are also useful in an **input-output controllability analysis** for determining the viability of decentralized control. + + +## Model Reduction {#model-reduction} + + + + +### Introduction {#introduction} + +Modern controller design methods such as \\(\mathcal{H}\_\infty\\) and LQG, produce controllers of order at least equal to that of the plant, and usually higher because of the inclusion of weights. +These control laws may be **too complex** with regards to **practical implementation** and simpler designs are then sought. +For this purpose, one can **either reduce the order of the plant model prior to controller design, or reduce the controller in the final stage**.
+ +
+
+ +Given a high-order linear time-invariant stable model \\(G\\), find a low-order approximation \\(G\_a\\) such that the infinity (\\(\mathcal{H}\_\infty\\) or \\(\mathcal{L}\_\infty\\)) norm of the difference \\(\\|G - G\_a\\|\_\infty\\) is small. + +
+ +By model order, we mean the dimension of the state vector in a minimal realization. +This is sometimes called the **McMillan degree**.
+ +So far we have only been interested in the infinity (\\(\mathcal{H}\_\infty\\)) norm of stable systems. +But the error \\(G-G\_a\\) may be unstable and **the definition of the infinity norm needs to be extended to unstable systems**. + +
+
+ +\\(\mathcal{L}\_\infty\\) defines the set of rational functions which have no poles on the imaginary axis, it includes \\(\mathcal{H}\_\infty\\), and its norm (like \\(\mathcal{H}\_\infty\\)) is given by + +\begin{equation} + \\|G\\|\_\infty = \sup\_\omega \maxsv(G(j\omega)) +\end{equation} + +
+ +We will describe three main methods for this problem: + +- **Balanced truncation** +- **Balanced residualization** +- **Optimal Hankel norm approximation** + +Each method gives a **stable approximation** and a **guaranteed bound on the error in the approximation**. +We will further show how the methods can be employed to reduce the order of an **unstable** model \\(G\\). + +All these methods start from a special state-space realization of \\(G\\) referred to as **balanced**. +We will describe this realization, but first we will show how the techniques of truncation and residualization can be used to remove the high frequency or fast modes of a state-space realization. + + +### Truncation and Residualization {#truncation-and-residualization} + +Let \\((A,B,C,D)\\) be a minimal realization of a stable system \\(G(s)\\), and partition the state vector \\(x\\), of dimension \\(n\\), into \\(\colvec{x\_1 \\ x\_2}\\) where \\(x\_2\\) is the vector of \\(n-k\\) states we wish to remove. +With approximate partitioning of \\(A\\), \\(B\\) and \\(C\\), the state space equations become + +\begin{equation} + \begin{aligned} + \dot{x}\_1 &= A\_{11} x\_1 + A\_{12} x\_2 + B\_1 u \\\\\\ + \dot{x}\_2 &= A\_{21} x\_1 + A\_{22} x\_2 + B\_2 u \\\\\\ + y &= C\_1 x\_1 + C\_2 x\_2 + D u + \end{aligned} +\end{equation} + + +#### Truncation {#truncation} + +A k-th order truncation of the realization \\(G \triangleq (A, B, C, D)\\) is given by \\(G\_a \triangleq (A\_{11}, B\_1, C\_1, D)\\). +The truncated model \\(G\_a\\) is equal to \\(G\\) at infinite frequency \\(G(\infty) = G\_a(\infty) = D\\), but apart from this, we cannot say anything for the general case about the relationship between \\(G\\) and \\(G\_a\\).
+ +If however, \\(A\\) is in **Jordan form**, then it is easy to **order the states** so that \\(x\_2\\) corresponds to **high frequency or fast modes**. + + +##### Modal Truncation {#modal-truncation} + +For simplicity, assume that \\(A\\) has been diagonalized so that + +\begin{align\*} + A &= \begin{bmatrix} + \lambda\_1 & 0 & \dots & 0 \\\\\\ + 0 & \lambda\_2 & \dots & 0 \\\\\\ + \vdots & \vdots & \ddots & \vdots \\\\\\ + 0 & 0 & \dots & \lambda\_n \\\\\\ + \end{bmatrix},\quad B = \begin{bmatrix} + b\_1^T \\ b\_2^T \\ \vdots \\ b\_n^T + \end{bmatrix} \\\\\\ + C &= \begin{bmatrix} + c\_1, c\_2, \dots, c\_n + \end{bmatrix} +\end{align\*} + +Then, if the \\(\lambda\_i\\) are ordered so that \\(|\lambda\_1| < |\lambda\_2| < \dots\\), the fastest modes are removed from the model after truncation. +The difference between \\(G\\) and \\(G\_a\\) following a k-th order model truncation is given by +\\[ G - G\_a = \sum\_{i = k+1}^n \frac{c\_i b\_i^T}{s - \lambda\_i} \\] +and therefore + +\begin{equation} + \\| G - G\_a \\|\_\infty \le \sum\_{i = k+1}^n \frac{\maxsv(c\_i b\_i^t)}{|\text{Re}(\lambda\_i)|} +\end{equation} + +It is interesting to note that the error depends on the residues \\(c\_i b\_i^T\\) as well as the \\(\lambda\_i\\). +The distance of \\(\lambda\_i\\) from the imaginary axis is therefore not a reliable indicator of whether the associated mode should be included in the reduced order model or not.
+ +An advantage of modal truncation is that the poles of the truncated model are a subset of the poles of the original model and therefore **retain any physical interpretation** they might have. + + +#### Residualization {#residualization} + +In truncation, we discard all the states and dynamics associated with \\(x\_2\\). +Suppose that instead of this, we simply set \\(\dot{x}\_2 = 0\\), i.e. we residualize \\(x\_2\\), in the state-space equations. +One can then solve for \\(x\_2\\) in terms of \\(x\_1\\) and \\(u\\), and back substitution of \\(x\_2\\), then gives + +\begin{align\*} + \dot{x}\_1 &= (A\_{11} - A\_{12} A\_{22}^{-1} A\_{21}) x\_1 + (B\_1 - A\_{12} A\_{22}^{-1} B\_2) u \\\\\\ + y &= (C\_1 - C\_2 A\_{22}^{-1} A\_{21}) x\_1 + (D - C\_2 A\_{22}^{-1} B\_2) u +\end{align\*} + +And let assume \\(A\_{22}\\) is invertible and define + +\begin{alignat\*}{3} + &A\_r \triangleq A\_{11} - A\_{12}A\_{22}^{-1}A\_{21} & & \quad B\_r \triangleq B\_1 - A\_{12}A\_{22}^{-1}B\_2\\\\\\ + &C\_r \triangleq C\_1 - C\_2A\_{22}^{-1}A\_{21} & & \quad D\_r \triangleq D - C\_2A\_{22}^{-1}B\_2 +\end{alignat\*} + +The reduced order model \\(G\_a(s) = (A\_r, B\_r, C\_r, D\_r)\\) is called a **residualization** of \\(G(s) = (A, B, C, D)\\). +Usually \\((A, B, C, D)\\) will have been put into **Jordan form**, with the eigenvalues ordered so that \\(x\_2\\) contains the fast modes.
+ +Model reduction by residualization is then equivalent to singular perturbation approximation, where **the derivatives of the fastest states are allowed to approach zero** with some parameter \\(\epsilon\\).
+ +An important property of residualization is that **it preserves the steady-state gain of the system**: + +\begin{equation} + \tcmbox{G\_a(0) = G(0)} +\end{equation} + +This should be no surprise since the residualization process sets derivatives to zero, which are zero anyway at steady-state. +But it is in stark contrast to truncation which retains the system behavior at infinite frequency. +This contrast between truncation and residualization follows from the simple bilinear relationship \\(s \to \frac{1}{s}\\) which relates the two.
+ +It is clear that **truncation is to be preferred when accuracy is required at high frequencies**, whereas **residualization is better for low frequency modelling**.
+ +Both methods depend to a large extent on the original realization and we have suggested to use of the Jordan form. +A better realization, with many useful properties, is the **balanced realization**. + + +### Balanced Realization {#balanced-realization} + +A balanced realization is an asymptotically stable minimal realization in which the **controllability and observability Gramiams are equal and diagonal**.
+ +Let \\((A,B,C,D)\\) be a minimal realization of a stable, rational transfer function \\(G(s)\\), then \\((A,B,C,D)\\) is called **balanced** if the solutions to be following Lyapunov equations + +\begin{subequations} + \begin{align} + AP + PA^T + BB^T &= 0 \\\\\\ + A^TQ + QA + C^TC &= 0 + \end{align} +\end{subequations} + +are \\(P = Q = \text{diag}(\sigma\_1, \sigma\_2, \dots, \sigma\_n) \triangleq \Sigma\\), where \\(\sigma\_1 \ge \sigma\_2 \ge \dots \ge \sigma\_n > 0\\). +\\(P\\) and \\(Q\\) are the **controllability and observability Gramiams**, also defined by + +\begin{subequations} + \begin{align} + P &\triangleq \int\_0^\infty e^{At} B B^T e^{A^Tt} dt \\\\\\ + Q &\triangleq \int\_0^\infty e^{A^Tt} C^T C e^{At} dt + \end{align} +\end{subequations} + +\\(\Sigma\\) is therefore simply referred to as the Gramiam of \\(G(s)\\). +The \\(\sigma\_i\\) are the **ordered Hankel singular values** of \\(G(s)\\), more generally defined as \\(\sigma\_i \triangleq \lambda\_i^{\frac{1}{2}}(PQ)\\), \\(i = 1, \dots, n\\). +Notice that \\(\sigma\_1 = \\|G\\|\_H\\) is the Hankel norm of \\(G(s)\\).
+ +In balanced realization the value of each \\(\sigma\_i\\) is associated with a state \\(x\_i\\) of the balanced system. + +
+
+ +The size of \\(\sigma\_i\\) is a relative measure of the contribution that \\(x\_i\\) makes to the input-output behavior of the system. + +
+ +Therefore if \\(\sigma\_1 \gg \sigma\_2\\), then the state \\(x\_1\\) affects the input-output behavior much more than \\(x\_2\\), or indeed any other state because of the ordering of the \\(\sigma\_i\\).
+ +After balancing a system, each state is just as controllable as it is observable, and a measure of a state's joint observability and controllability is given by its associated Hankel singular value. +This property is fundamental to the model reduction methods in the remainder of this chapter which work by removing states having little effect on the system's input-output behavior. + + +### Balanced Truncation and Balanced Residualization {#balanced-truncation-and-balanced-residualization} + +Let the balanced realization \\((A,B,C,D)\\) of \\(G(s)\\) and the corresponding \\(\Sigma\\) be partitioned compatibly as + +\begin{equation} + \begin{aligned} + A &= \begin{bmatrix} + A\_{11} & A\_{12} \\\\\\ + A\_{21} & A\_{22} + \end{bmatrix}, \quad B = \begin{bmatrix} + B\_1 \\ B\_2 + \end{bmatrix} \\\\\\ + C &= \begin{bmatrix} + C\_1 & C\_2 + \end{bmatrix}, \quad \Sigma = \begin{bmatrix} + \Sigma\_1 & 0 \\\\\\ + 0 & \Sigma\_2 + \end{bmatrix} + \end{aligned} +\end{equation} + +where + +\begin{align\*} + \Sigma\_1 &= \text{diag}(\sigma\_1, \sigma\_2, \dots, \sigma\_k)\\\\\\ + \Sigma\_2 &= \text{diag}(\sigma\_{k+1}, \sigma\_{k+2}, \dots, \sigma\_n),\ \sigma\_k > \sigma\_{k+1} +\end{align\*} + + +##### Balanced Truncation {#balanced-truncation} + +The reduced order model given by \\((A\_{11},B\_1,C\_1,D)\\) is called a **balanced truncation** of the full order system \\(G(s)\\). +The idea of balancing truncation is thus to first make a balanced realization of the system and then to discard the states corresponding to small Hankel singular values. + +A balanced truncation is also a balanced realization, and the infinity norm of the error between \\(G(s)\\) and the reduced order system \\(G\_a(s)\\) is bounded by twice the sum of the last \\(n-k\\) Hankel singular values, i.e. twice the trace of \\(\Sigma\_2\\): + +\begin{equation} + \\|G(s) - G\_a(s)\\|\_\infty \le 2 \cdot \text{Tr}\big( \Sigma\_2 \big) +\end{equation} + +For the case of repeated Hankel singular values, each repeated Hankel singular value is to be counted only once in calculating the sum. + +Useful algorithms that compute balanced truncations without first computing a balanced realization still require the computation of the observability and controllability Gramiam, which can be a problem if the system to be reduced is of very high order. + + +##### Balanced Residualization {#balanced-residualization} + +In balanced truncation above, we discarded the least controllable and observable states corresponding to \\(\Sigma\_2\\). +In balanced residualization, we simply set to zero the derivatives of all these states. + + +##### Theorem {#theorem} + +Let \\(G(s)\\) be a stable rational transfer function with Hankel singular values \\(\sigma\_1 > \sigma\_2 > \dots > \sigma\_N\\) where each \\(\sigma\_i\\) has multiplicity \\(r\_i\\) and let \\(G\_a^k(s)\\) be obtained by truncating or residualizing the balanced realization of \\(G(s)\\) to the first \\((r\_1 + r\_2 + \dots + r\_k)\\) states. +Then + +\begin{equation} + \\|G(s) - G\_a^k(s)\\|\_\infty \le 2(\sigma\_{k+1} + \sigma\_{k+2} + \dots + \sigma\_N) +\end{equation} + + +### Optimal Hankel Norm Approximation {#optimal-hankel-norm-approximation} + +In this approach to model reduction, the problem that is directly addressed is the following: given a stable model \\(G(s)\\) of order \\(n\\), find a reduced order model \\(G\_h^k(s)\\) of degree \\(k\\) such that the Hankel norm of the approximation error, \\(\\| G(s) - G\_h^k(s) \\|\_H\\), is minimized. + +
+
+ +The Hankel norm of any stable transfer function \\(E(s)\\) is defined as + +\begin{equation} + \\| E(s) \\|\_H \triangleq \rho^{\frac{1}{2}} (PQ) +\end{equation} + +where \\(P\\) and \\(Q\\) are the controllability and observability Gramiams of \\(E(s)\\). + +
+ +So in the optimization we seek an error which is in some sense closest to being completely unobservable and completely uncontrollable. + +The infinity norm bound on the approximate error for the optimal Hankel norm approximation is better than for balanced truncation and residualization. This is shown with the following theorem. + + +##### Theorem {#theorem} + +Let \\(G(s)\\) be a stable, square, transfer function \\(G(s)\\) with Hankel singular values \\(\sigma\_1 \ge \sigma\_2 \ge \dots \ge \sigma\_k \ge \sigma\_{k+1} = \sigma\_{k+2} = \dots = \sigma\_{k+l} > \sigma\_{k+l+1} \ge \dots \ge \sigma\_n > 0\\). +An optimal Hankel norm approximation of order \\(k\\), \\(G\_h^k(s)\\), can be constructed as follows. + +Let \\((A,B,C,D)\\) be a balanced realization of \\(G(s)\\) with the Hankel singular values reordered so that the Gramiam matrix is + +\begin{align\*} + \Sigma &= \text{diag}(\sigma\_1,\dots,\sigma\_k,\sigma\_{k+l+1},\dots,\sigma\_n,\sigma\_{k+1},\dots,\sigma\_{k+l})\\\\\\ + &\triangleq \text{diag}(\Sigma\_l, \sigma\_{k+1}I) +\end{align\*} + +Partition \\((A,B,C,D)\\) to conform with \\(\Sigma\\) +\\[ A = \begin{bmatrix} A\_{11} & A\_{12} \\ A\_{21} & A\_{22} \end{bmatrix},\ B = \begin{bmatrix} B\_1 \\ B\_2 \end{bmatrix},\ C = \begin{bmatrix} C\_1 & C\_2 \end{bmatrix} \\] +Define \\((\hat{A},\hat{B},\hat{C},\hat{D})\\) by + +\begin{subequations} +\begin{align} + \hat{A} &\triangleq \Gamma^{-1} \left( \sigma\_{k+1}^2 A\_{11}^T + \sigma\_1 A\_{11} \Sigma\_1 - \sigma\_{k+1} C\_{1}^T U B\_{1}^T \right) \\\\\\ + \hat{B} &\triangleq \Gamma^{-1} \left( \sigma\_1 B\_1 + \sigma\_{k+1} C\_1^T U \right) \\\\\\ + \hat{C} &\triangleq C\_1 \Sigma\_1 + \sigma\_{k+1} U B\_1^T \\\\\\ + \hat{D} &\triangleq D - \sigma\_{k+1} U +\end{align} +\end{subequations} + +where \\(U\\) is a unitary matrix satisfying +\\[ B\_2 = - C\_2^T U \ \text{ and } \ \Gamma \triangleq \Sigma\_1^2 - \sigma\_{k+1}^2 I \\] + +The matrix \\(\hat{A}\\) has \\(k\\) "stable" eigenvalues; the remaining ones are in the open right-half plane. +Then +\\[ G\_h^k(s) + F(s) = \left[ \begin{array}{c|cc} + \hat{A} & \hat{B} \\ \hline + \hat{C} & \hat{D} +\end{array} \right] \\] +where \\(G\_h^k(s)\\) is a stable optimal Hankel norm approximation of order \\(k\\), and \\(F(s)\\) is an anti-stable (all poles in the open right-half plane) transfer function of order \\(n-k-l\\). +The Hankel norm of the error between \\(G\\) and the optimal approximation \\(G\_h^k\\) is equal to the \\((k+1)\text{'th}\\) Hankel singular value of \\(G\\): + +\begin{equation} + \tcmbox{\\| G - G\_h^k \\|\_H = \sigma\_{k+1}(G)} +\end{equation} + + +### Model Reduction - Practical Summary {#model-reduction-practical-summary} + + +#### Reduction of model {#reduction-of-model} + +Three reduction techniques have been discussed here: balanced residualization, balance truncation and optimal Hankel norm approximation. + +It is sometimes desirable to have the steady-state gain of the reduced plant model the same as the full order model. +For instance, this is the case if we want to use feedforward control. +The truncated and optimal Hankel norm approximated systems do not preserve the steady-state gain and they have to be **scaled**, i.e. the model approximation \\(G\_a\\) is replaced by \\(G\_a W\_s\\) where \\(W\_a = G\_a(0)^{-1} G(0)\\), \\(G(s)\\) being the full order model. + +However, this scaling generally introduced **large model errors at other frequencies**. + +
+
+ +Hence **residualization** is to be preferred whenever low frequency matching is desired. + +
+ + +#### Reduction of a 2 degrees-of-freedom controller {#reduction-of-a-2-degrees-of-freedom-controller} + +Let's consider a 2 degrees-of-freedom controller \\(K = [K\_1\ K\_2]\\). +In order ensure perfect steady-state tracking, i.e. to match \\(T\_{\text{ref}}\\) at steady-state, a prefilter \\(W\_i\\) is added to scale the controller: \\(K = [K\_1 W\_i\ K\_2]\\). + +There are two approaches for order reduction: + +1. the scaled controller \\([K\_1 W\_i\ K\_2]\\) is reduced. + A balanced residualization of the controller preserves the controller's steady state gain and would not need to be scaled again. + Reductions via truncation and optimal Hankel norm approximation techniques, however, lose the steady-state gain and reduced controllers would need to be re-scaled to match \\(T\_{\text{ref}}(0)\\) +2. the full order controller \\([K\_1\ K\_2]\\) is reduced without first scaling the prefilter. + In which case, scaling is done after reduction. + A larger scaling is generally required for the truncated and optimal Hankel norm approximated controllers and this gives poorer model matching at other frequencies. + +In both cases, the balanced residualization is preferred. + + +### Reduction of Unstable Models {#reduction-of-unstable-models} + +Balanced truncation, balanced residualization and optimal Hankel norm approximation only apply to stable models. +In this section we briefly present two approaches for reducing the order of an unstable model. + + +#### Stable Part Model Reduction {#stable-part-model-reduction} + +The unstable model can be first decomposed into its stable and anti-stable parts: + +\begin{equation} + G(s) = G\_u(s) + G\_s(s) +\end{equation} + +where \\(G\_u(s)\\) has all its poles in the closed right-half plane and \\(G\_s(s)\\) has all its poles in the open left-half plane. +Balanced truncation, balanced residualization or optimal Hankel norm approximation can then be applied to the stable part \\(G\_s(s)\\) to find a reduced order approximation \\(G\_{sa}(s)\\). +This is then added to the anti-stable part to give + +\begin{equation} + G\_a(s) = G\_u(s) + G\_{sa}(s) +\end{equation} + +as an approximation to the full order model \\(G(s)\\). + + +#### Coprime Factor Model Reduction {#coprime-factor-model-reduction} + +The coprime factors of a transfer function \\(G(s)\\) are stable, and therefore we could reduce the order of these factors using balanced truncation, balanced residualization or optimal Hankel norm approximation: + +- Let \\(G(s) = M^{-1}(s) N(s)\\), where \\(M(s)\\) and \\(N(s)\\) are stable left-coprime factors of \\(G(s)\\) +- Approximate \\([N\ M]\\) of degree \\(n\\) by \\([N\_a \ M\_a]\\) of degree \\(k 0\\). +Then \\((N\_a, M\_a)\\) is a normalized left-coprime factorization of \\(G\_a = M\_a^{-1} N\_a\\), and \\([N\_a,\ M\_a]\\) has Hankel singular values \\(\sigma\_1, \sigma\_2, \dots, \sigma\_k\\). + + +### Conclusion {#conclusion} + +We have presented and compared three main methods for model reduction based on balanced realizations: **balanced truncation**, **balanced residualization** and **optimal Hankel norm approximation**.
+ +Residualization, unlike truncation and optimal Hankel norm approximation, preserves the steady-state gain of the system, and like truncation, it is simple and computationally inexpensive. +It is observed that truncation and optimal Hankel norm approximation perform better at high frequencies, where residualization performs better at low and medium frequencies, i.e. up to the critical frequencies.
+ +Thus **for plant model reduction**, where models are not accurate at high frequencies to start with, **residualization would seem to be a better option**. +Further, if the steady state gains are to be kept unchanged, truncated and optimal Hankel norm approximated systems require scaling, which may result in large errors. +In such a case, too, residualization would be preferred choice.
+ +For **controller reduction**, we have shown in a two degrees-of-freedom example, the importance of scaling and steady-state gain matching. + +In general, steady-state gain matching may not be crucial, but the matching should usually be good near the desired closed-loop bandwidth. +Balanced residualization has been seen to perform close to the full order system in this frequency range. +Good approximation at high frequencies may also sometimes be desired. +In such a case, using truncation or optimal Hankel norm approximation with appropriate frequency weightings may yield better results. + +# Bibliography +Skogestad, S., & Postlethwaite, I., *Multivariable feedback control: analysis and design* (2007), : John Wiley. [↩](#ad6f62e369b7a8d31c21671886adec1f) diff --git a/content/book/taghirad13_paral.md b/content/book/taghirad13_paral.md new file mode 100644 index 0000000..abe62ff --- /dev/null +++ b/content/book/taghirad13_paral.md @@ -0,0 +1,2916 @@ ++++ +title = "Parallel robots : mechanics and control" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Stewart Platforms]({{< relref "stewart_platforms" >}}), [Reference Books]({{< relref "reference_books" >}}) + +Reference +: (Taghirad, 2013) + +Author(s) +: Taghirad, H. + +Year +: 2013 + + +## Introduction {#introduction} + + + +This book is intended to give some analysis and design tools for the increase number of engineers and researchers who are interested in the design and implementation of parallel robots. +A systematic approach is presented to analyze the kinematics, dynamics and control of parallel robots. +To define the motion characteristics of such robots, it is necessary to represent 3D motion of the robot moving platform with respect to a fixed coordinate. +This issue leads to the requirements for 3D representation of position, orientation and motion of bodies in space. +In chapter [sec:motion_representation](#sec:motion_representation), such representation are introduced with emphasis on screw coordinates, which makes the representation of the general motion of the robot much easier to follow.
+ +Kinematic analysis refers to the study of robot motion geometry without considering the forces and torques that cause the motion. +In this analysis (chapter [sec:kinematics](#sec:kinematics)), the relation between the geometrical parameters of the manipulator and the final motion of the moving platform is derived and analyzed.
+ +In Chapter [sec:jacobian](#sec:jacobian), the kinematics analysis of robot manipulators is further examined beyond static positioning. +Jacobian analysis not only reveals the relation between the joint variable velocities of a parallel manipulator and the moving platform linear and angular velocities, but also constructs the transformation needed to find the actuator forces from the forces and moments acting on the moving platform. +A systematic means to perform Jacobian analysis of parallel manipulators is given in this chapter.
+ +Dynamic analysis of parallel manipulators presents an inherent complexity due to their closed-loop structure and kinematic constraints. +Nevertheless, dynamic modeling is quite important for the control, in particular because parallel manipulators are preferred in applications where precise positioning and suitable dynamic performance under high loads are the prime requirements. +In Chapter [sec:dynamics](#sec:dynamics), the dynamic analysis of such robots is examined through three methods, namely the Newton-Euler principle of virtual work and Lagrange formations. +Furthermore, a method is presented in this chapter to formulate the dynamic equation of parallel robots into closed form, by which the dynamic matrices are more tractable, and dynamics verification becomes feasible.
+ +The control of parallel robots is elaborated in the last two chapters, in which both the motion and force control are covered. + + +## Motion Representation {#motion-representation} + + + + +### Spatial Motion Representation {#spatial-motion-representation} + +Six independent parameters are sufficient to fully describe the spatial location of a rigid body. + +Consider a rigid body in a spatial motion as represented in Figure [fig:rigid_body_motion](#fig:rigid_body_motion). +Let us define: + +- A **fixed reference coordinate system** \\((x, y, z)\\) denoted by frame \\(\\{\bm{A}\\}\\) whose origin is located at point \\(O\_A\\) +- A **moving coordinate system** \\((u, v, z)\\) denoted by frame \\(\\{\bm{B}\\}\\) attached to the rigid body at point \\(O\_B\\) + +The absolute position of point \\(P\\) of the rigid body can be constructed from the relative position of that point with respect to the moving frame \\(\\{\bm{B}\\}\\), and the **position and orientation** of the moving frame \\(\\{\bm{B}\\}\\) with respect to the fixed frame \\(\\{\bm{A}\\}\\). + + + +{{< figure src="/ox-hugo/taghirad13_rigid_body_motion.png" caption="Figure 1: Representation of a rigid body spatial motion" >}} + + +#### Position of a point {#position-of-a-point} + +The position of a point \\(P\\) with respect to a frame \\(\\{\bm{A}\\}\\) can be described by a \\(3 \times 1\\) position vector. +The name of the frame is usually added as a leading superscript: \\({}^A\bm{P}\\) which reads as vector \\(\bm{P}\\) in frame \\(\\{\bm{A}\\}\\). + +\begin{equation} + \tcmbox{{}^A\bm{P} = \begin{bmatrix} P\_x\\ P\_y\\ P\_z \end{bmatrix}} +\end{equation} + + +#### Orientation of a Rigid Body {#orientation-of-a-rigid-body} + +The orientation of the whole rigid body is the same for all its points (by definition). +Hence, representation of the orientation of a rigid body can be viewed as that for the orientation of a moving frame attached to the rigid body. +It can be **represented in several different ways**: the rotation matrix, the screw axis representation and Euler angles are common descriptions. + + +##### Rotation Matrix {#rotation-matrix} + +We consider a rigid body that has been exposed to a pure rotation. +Its orientation has changed from a state represented by frame \\(\\{\bm{A}\\}\\) to its current orientation represented by frame \\(\\{\bm{B}\\}\\) (Figure [2](#org42e4742)). + +A \\(3 \times 3\\) rotation matrix \\({}^A\bm{R}\_B\\) is defined by + +\begin{equation} + \tcmbox{{}^A\bm{R}\_B = \left[ {}^A\hat{\bm{x}}\_B | {}^A\hat{\bm{y}}\_B | {}^A\hat{\bm{z}}\_B \right] = \begin{bmatrix} + u\_{x} & v\_{x} & z\_{x} \\\\\\ + u\_{y} & v\_{y} & z\_{y} \\\\\\ + u\_{z} & v\_{z} & z\_{z} + \end{bmatrix}} +\end{equation} + +in which \\({}^A\hat{\bm{x}}\_B, {}^A\hat{\bm{y}}\_B\\) and \\({}^A\hat{\bm{z}}\_B\\) are the Cartesian unit vectors of frame \\(\\{\bm{B}\\}\\) represented in frame \\(\\{\bm{A}\\}\\). + +\begin{align\*} + {}^A\hat{\bm{x}}\_B &= {}^A\hat{u} = u\_x \hat{i} + u\_y \hat{j} + u\_z \hat{k} \\\\\\ + {}^A\hat{\bm{y}}\_B &= {}^A\hat{v} = v\_x \hat{i} + v\_y \hat{j} + v\_z \hat{k} \\\\\\ + {}^A\hat{\bm{z}}\_B &= {}^A\hat{w} = w\_x \hat{i} + w\_y \hat{j} + w\_z \hat{k} +\end{align\*} + +The nine elements of the rotation matrix can be simply represented as the projections of the Cartesian unit vectors of frame \\(\\{\bm{B}\\}\\) on the unit vectors of frame \\(\\{\bm{A}\\}\\). + + + +{{< figure src="/ox-hugo/taghirad13_rotation_matrix.png" caption="Figure 2: Pure rotation of a rigid body" >}} + +The rotation matrix has a number of properties linking each of its nine elements: + +- **Orthonormality**: the rotation matrix is an orthonormal matrix +- **Transposition**: \\({}^B\bm{R}\_A = {}^A\bm{R}\_B^{T}\\) +- **Inverse**: \\({}^B\bm{R}\_A = {}^A\bm{R}\_B^{-1} = {}^A\bm{R}\_B^{T}\\) +- **Pure Rotation Mapping**: Suppose that the point of a rigid body with respect to the moving frame \\(\\{\bm{B}\\}\\) is given and denoted by \\({}^B\bm{P}\\) and we wish to express the position of this point with respect to the fixed frame \\(\\{\bm{A}\\}\\). Consider that the rigid body has been exposed to a pure rotation (\\(\\{\bm{A}\\}\\) and \\(\\{\bm{B}\\}\\) are coincident at their origins). Then + \\[ \tcmbox{{}^A\bm{P} = {}^A\bm{R}\_B {}^B\bm{P}} \\] +- **Determinant**: \\(\det({}^A\bm{R}\_B) = 1\\) +- **Eigenvalues**: The eigenvalues of a rotation matrix \\({}^A\bm{R}\_B\\) are equal to \\(1\\), \\(e^{i\theta}\\) and \\(e^{-i\theta}\\) where \\(\theta\\) is calculated from \\(\theta = \cos^{-1}\frac{\text{tr}({}^A\bm{R}\_B) - 1}{2}\\). + + +##### Screw Axis Representation {#screw-axis-representation} + +As seen previously, there exist an **invariant angle** \\(\theta\\) corresponding to the rotation matrix. This angle is an equivalent angle of rotation. +The rotation is a spatial change of orientation about an axis which is called the **screw axis**. +It can be shown that this screw axis is also an invariant of the rotation matrix, it is the eigenvector corresponding to the eigenvalue \\(\lambda = 1\\). + +The term screw axis for this axis of rotation has the benefit that a general motion of a rigid body, which is composed as a pure translation and a pure rotation, can be further represented by the same axis of rotation. + +The screw axis representation has the benefit of **using only four parameters** to describe a pure rotation. +These parameters are the angle of rotation \\(\theta\\) and the axis of rotation which is a unit vector \\({}^A\hat{\bm{s}} = [s\_x, s\_y, s\_z]^T\\). + + + +{{< figure src="/ox-hugo/taghirad13_screw_axis_representation.png" caption="Figure 3: Pure rotation about a screw axis" >}} + +The Rodrigue's rotation formula for spatial rotation of a rigid body gives us the new position \\(\bm{P}\_2\\) of point \\(\bm{P}\_1\\) after a rotation represented by the screw axis \\(\hat{\bm{s}}\\) and the angle \\(\theta\\): + +\begin{equation} + \tcmbox{\bm{P}\_2 = \bm{P}\_1 \cos \theta + (\hat{\bm{s}} \times \bm{P}\_1)\sin\theta + (\bm{P}\_1 \cdot \hat{\bm{s}})\hat{\bm{s}}} +\end{equation} + + +##### Euler Angles {#euler-angles} + +Since rotation in space is a motion with three-degrees-of-freedom, a set of three independent parameters is sufficient to represent the orientation. + +In an Euler angle representation, three **successive** rotations about the coordinate system of either **fixed** or **moving** frame are used to describe the orientation of the rigid body. + +One type of Euler angle corresponds to rotations considered with respect to the fixed frame. The representation is called pitch-roll-yaw, or fixed X-Y-Z Euler angles. + +Three other types of Euler angles are consider with respect to a moving frame: they are denoted \\(w-v-u\\), \\(w-v-w\\) and \\(w-u-w\\) Euler angles. + + +#### Pitch-Roll-Yaw Euler Angles {#pitch-roll-yaw-euler-angles} + +The pitch, roll and yaw angles are defined for a moving object in space as the rotations along the lateral, longitudinal and vertical axes attached to the moving object. + + + +{{< figure src="/ox-hugo/taghirad13_pitch-roll-yaw.png" caption="Figure 4: Definition of pitch, roll and yaw angles on an air plain" >}} + +Since all three rotations take place about the axes of a **fixed coordinate frame**, the resulting rotation matrix is obtained by multiplying the three basic rotation matrices as follows: +\\[ \tcmbox{\bm{R}\_{PRY}(\alpha, \beta, \gamma) = \bm{R}\_z(\gamma) \bm{R}\_y(\beta) \bm{R}\_x(\alpha)} \\] + +To go from rotation matrix to Pitch-Roll-Yaw angles, the following set of equations can be used: + +\begin{align\*} + \alpha &= \atan2\left( \frac{r\_{32}}{\cos \beta}, \frac{r\_{33}}{\cos \beta} \right) \\\\\\ + \beta &= \atan2\left( -r\_{31}, \pm \sqrt{r\_{11}^2 + r\_{21}^2} \right) \\\\\\ + \gamma &= \atan2\left( \frac{r\_{21}}{\cos \beta}, \frac{r\_{11}}{\cos \beta} \right) +\end{align\*} + + +#### u-v-w Euler Angles {#u-v-w-euler-angles} + +Another way to describe the orientation of a moving object is to consider three successive rotations about the **coordinate axes of the moving frame**. +Since the rotations do not occur about fixed axes, pre-multiplications of the individual rotation matrices fails to give the correct solution. It can be shown that the resulting matrix can be derived by post-multiplication of the individual rotation matrices as follows: +\\[ {}^A\bm{R}\_B(\alpha, \beta, \gamma) = \bm{R}\_u(\alpha) \bm{R}\_v(\beta) \bm{R}\_w(\gamma) \\] + +The inverse solution for the u-v-w Euler angles is the following (for \\(\cos \beta \ne 0\\)): + +\begin{align\*} + \alpha &= \atan2\left( -\frac{r\_{23}}{\cos \beta}, \frac{r\_{33}}{\cos \beta} \right) \\\\\\ + \beta &= \atan2\left( r\_{13}, \pm \sqrt{r\_{11}^2 + r\_{12}^2} \right) \\\\\\ + \gamma &= \atan2\left( -\frac{r\_{12}}{\cos \beta}, \frac{r\_{11}}{\cos \beta} \right) +\end{align\*} + + +#### w-v-w Euler Angles {#w-v-w-euler-angles} + +Similarly: +\\[ \bm{R}\_{wvw}(\alpha, \beta, \gamma) = \bm{R}\_w(\alpha) \bm{R}\_v(\beta) \bm{R}\_w(\gamma) \\] + +And for \\(\sin\beta\ne0\\): + +\begin{align\*} + \alpha &= \atan2\left( \frac{r\_{23}}{\sin\beta}, \frac{r\_{13}}{\sin\beta} \right) \\\\\\ + \beta &= \atan2\left( \pm \sqrt{r\_{31}^2 + r\_{32}^2}, r\_{33} \right) \\\\\\ + \gamma &= \atan2\left( \frac{r\_{32}}{\sin\beta}, -\frac{r\_{31}}{\sin\beta} \right) +\end{align\*} + + +#### w-u-w Euler Angles {#w-u-w-euler-angles} + +Here, the second rotation is about the \\(u\\) axis: +\\[ \bm{R}\_{wuw}(\alpha, \beta, \gamma) = \bm{R}\_w(\alpha) \bm{R}\_u(\beta) \bm{R}\_w(\gamma) \\] + +And for \\(\sin\beta\ne0\\): + +\begin{align\*} + \alpha &= \atan2\left( \frac{r\_{13}}{\sin\beta}, -\frac{r\_{23}}{\sin\beta} \right) \\\\\\ + \beta &= \atan2\left( \pm \sqrt{r\_{31}^2 + r\_{32}^2}, r\_{33} \right) \\\\\\ + \gamma &= \atan2\left( \frac{r\_{31}}{\sin\beta}, \frac{r\_{32}}{\sin\beta} \right) +\end{align\*} + + +#### Notes about Euler Angles {#notes-about-euler-angles} + +If the Euler angle is given, a **unique rotation matrix** is determined for the orientation of the rigid body. +However, the inverse map is not one-to-one, and at least two Euler angle sets can be found for each orientation. + +If the Euler angle is chosen for the representation of the orientation, extra care should be taken. From the continuity of the motion, a suitable solution may be chosen, such that no abrupt changes are seen in the variation of the Euler angles in a typical maneuver. + +The use of rotation matrix to represent the orientation of a rigid body is then generally preferred although there are nine parameters for that description. + + +### Motion of a Rigid Body {#motion-of-a-rigid-body} + +Since the relative positions of a rigid body with respect to a moving frame \\(\\{\bm{B}\\}\\) attached to it is fixed for all time, it is sufficient to know the **position of the origin of the frame** \\(O\_B\\) and the **orientation of the frame** \\(\\{\bm{B}\\}\\) with respect to the fixed frame \\(\\{\bm{A}\\}\\), to represent the position of any point \\(P\\) in the space.
+ +Representation of the position of \\(O\_B\\) is uniquely given by the position vector, while orientation of the rigid body is represented in different forms. +However, for all possible orientation representation, a rotation matrix \\({}^A\bm{R}\_B\\) can be derived.
+ +
+
+ +Therefore, the location or **pose** of a rigid body, can be **fully determined** by: + +1. The **position vector** of point \\(O\_B\\) with respect to frame \\(\\{\bm{A}\\}\\) which is denoted \\({}^A\bm{P}\_{O\_B}\\) +2. The **orientation of the rigid body**, or the moving frame \\(\\{\bm{B}\\}\\) attached to it with respect to the fixed frame \\(\\{\bm{A}\\}\\), that is represented by \\({}^A\bm{R}\_B\\). + +
+ +The position of any point \\(P\\) of the rigid body with respect to the fixed frame \\(\\{\bm{A}\\}\\), which is denoted \\({}^A\bm{P}\\) may be determined thanks to the Chasles' theorem. + +
+
+ +If the pose of a rigid body \\(\\{{}^A\bm{R}\_B, {}^A\bm{P}\_{O\_B}\\}\\) is given, then the position of any point \\(P\\) of this rigid body with respect to \\(\\{\bm{A}\\}\\) is given by: + +\begin{equation} + {}^A\bm{P} = {}^A\bm{R}\_B {}^B\bm{P} + {}^A\bm{P}\_{O\_B} +\end{equation} + +
+ + +### Homogeneous Transformations {#homogeneous-transformations} + +To describe general transformations, we introduce the \\(4\times1\\) **homogeneous coordinates**, and Eq. [eq:chasles_therorem](#eq:chasles_therorem) is generalized to + +\begin{equation} + \tcmbox{{}^A\bm{P} = {}^A\bm{T}\_B {}^B\bm{P}} +\end{equation} + +in which \\({}^A\bm{T}\_B\\) is a \\(4\times4\\) **homogeneous transformation matrix**. + + +#### Homogeneous Coordinates {#homogeneous-coordinates} + +There are two basic classes of vector quantities, the generalization to homogeneous coordinates of which are different.
+ +The first type is called **line vector**. Line vectors refer to a vector of which its value depends on the line of action, or the position of where it is applied. Examples are the position vector, linear velocity, force vector.
+ +On the contrary, there exist quantities likes orientation that **hold for the whole rigid body** and do not correspond to a point. They can be positioned freely throughout the whole rigid body, without any change in their quantity. These types of vectors are called **free vectors**.
+ +For line vectors, both orientation and translation of the moving frame contribute to their value. +Homogeneous coordinate of such vectors is generated by appending \\(1\\) to the three components of that vector: + +\begin{equation} + \tcmbox{\bm{V} = \begin{bmatrix} v\_x \\ v\_y \\ v\_z \\ 1 \end{bmatrix}} +\end{equation} + +For free vectors, only the orientation of the moving frame contributes to their value. +The homogeneous coordinate is then + +\begin{equation} + \tcmbox{\bm{\omega} = \begin{bmatrix} \omega\_x \\ \omega\_y \\ \omega\_z \\ 0 \end{bmatrix}} +\end{equation} + + +#### Homogeneous Transformation Matrix {#homogeneous-transformation-matrix} + +
+
+ +The **homogeneous transformation matrix** is a \\(4\times4\\) matrix, defined for the purpose of transformation mapping of a vector in a homogeneous coordinate from one frame to another in a compact form. +The matrix is composed of the rotation matrix \\({}^A\bm{R}\_B\\) representing the orientation and the position vector \\({}^A\bm{P}\_{O\_B}\\) representing the translation. +It is partitioned as follows: + +\begin{equation} + {}^A\bm{T}\_B = + \left[ \begin{array}{ccc|c} + & & & \\\\\\ + & {}^A\bm{R}\_B & & {}^A\bm{P}\_{O\_B} \\\\\\ + & & & \\\\\\ + \hline + 0 & 0 & 0 & 1 \\\\\\ + \end{array} \right] +\end{equation} + +
+ +The homogeneous transformation matrix \\({}^A\bm{T}\_B\\) is a \\(4\times4\\) matrix operator mapping **vector valued** quantities represented by \\(4\times1\\) homogeneous coordinates.: + +\begin{align\*} + \left[ \begin{array}{c} \\ {}^A\bm{P} \\ \\ \hline 1 \end{array} \right] + & = + \left[ \begin{array}{ccc|c} + & & & \\\\\\ + & {}^A\bm{R}\_B & & {}^A\bm{P}\_{O\_B} \\\\\\ + & & & \\\\\\ + \hline + 0 & 0 & 0 & 1 \\\\\\ + \end{array} \right] + \left[ \begin{array}{c} \\ {}^B\bm{P} \\ \\ \hline 1 \end{array} \right] \\\\\\ + {}^A\bm{P} &= {}^A\bm{R}\_B {}^B\bm{P} + {}^A\bm{P}\_{O\_B} +\end{align\*} + +Using homogeneous coordinate for a **free vector** like angular velocity of a rigid body: + +\begin{align\*} + \left[ \begin{array}{c} \\ {}^A\bm{\omega} \\ \\ \hline 0 \end{array} \right] + & = + \left[ \begin{array}{ccc|c} + & & & \\\\\\ + & {}^A\bm{R}\_B & & {}^A\bm{P}\_{O\_B} \\\\\\ + & & & \\\\\\ + \hline + 0 & 0 & 0 & 1 \\\\\\ + \end{array} \right] + \left[ \begin{array}{c} \\ {}^B\bm{\omega} \\ \\ \hline 0 \end{array} \right] \\\\\\ + {}^A\bm{P} &= {}^A\bm{R}\_B {}^B\bm{P} +\end{align\*} + + +#### Screw Displacement {#screw-displacement} + +The most general rigid body displacement can be produced by a **translation along a line followed by a rotation about the same line**. +The line is called the **screw axis**. + +There exist transformations to from screw displacement notation to the transformation matrix. + + +#### Transformation Arithmetics {#transformation-arithmetics} + + +##### Consecutive transformations {#consecutive-transformations} + +Let us consider the motion of a rigid body described at three locations (Figure [fig:consecutive_transformations](#fig:consecutive_transformations)). +Frame \\(\\{\bm{A}\\}\\) represents the initial location, frame \\(\\{\bm{B}\\}\\) is an intermediate location, and frame \\(\\{\bm{C}\\}\\) represents the rigid body at its final location. + + + +{{< figure src="/ox-hugo/taghirad13_consecutive_transformations.png" caption="Figure 5: Motion of a rigid body represented at three locations by frame \\(\\{\bm{A}\\}\\), \\(\\{\bm{B}\\}\\) and \\(\\{\bm{C}\\}\\)" >}} + +Furthermore, suppose the position vector of a point \\(P\\) of the rigid body is given in the final location, that is \\({}^C\bm{P}\\) is given, and the position of this point is to be found in the fixed frame \\(\\{\bm{A}\\}\\), that is \\({}^A\bm{P}\\). +Since the locations of the rigid body is known relative to each other, \\({}^C\bm{P}\\) can be transformed to \\({}^B\bm{P}\\) using \\({}^B\bm{T}\_C\\): +\\[{}^B\bm{P} = {}^B\bm{T}\_C {}^C\bm{P}\\] + +Now, \\({}^B\bm{P}\\) can be transformed into \\({}^A\bm{P}\\): +\\[ {}^A\bm{P} = {}^A\bm{T}\_B {}^B\bm{P} \\] + +And we have: +\\[ {}^A\bm{P} = {}^A\bm{T}\_B {}^B\bm{T}\_C {}^C\bm{P} \\] + +From which, the consecutive transformation can be defined as follows: + +\begin{equation} + \tcmbox{{}^A\bm{T}\_C = {}^A\bm{T}\_B {}^B\bm{T}\_C} +\end{equation} + + +##### Inverse transformation {#inverse-transformation} + +Direct inversion of the \\(4\times4\\) homogeneous transfer matrix \\({}^A\bm{T}\_B\\) to obtain \\({}^B\bm{T}\_A\\) might be computationally intensive. +It is much easier to use the specific structure of the transfer matrix for inversion. + +To obtain \\({}^B\bm{T}\_A\\), it is necessary to compute \\({}^B\bm{R}\_A\\) and \\({}^B\bm{P}\_{O\_A}\\) from the known \\({}^A\bm{R}\_B\\) and \\({}^A\bm{P}\_{O\_B}\\), then + +\begin{equation\*} + {}^B\bm{T}\_A = + \left[ \begin{array}{ccc|c} + & & & \\\\\\ + & {}^B\bm{R}\_A & & {}^B\bm{P}\_{O\_A} \\\\\\ + & & & \\\\\\ + \hline + 0 & 0 & 0 & 1 \\\\\\ + \end{array} \right] +\end{equation\*} + +Moreover + +\begin{align\*} + {}^B\bm{R}\_A &= {}^A\bm{R}\_B^T \\\\\\ + {}^B\bm{P}\_{O\_A} &= {}^B\bm{R}\_A {}^A\bm{P}\_{O\_A} = - {}^B\bm{R}\_A {}^A\bm{P}\_{O\_B} \\\\\\ + &= -{}^A\bm{R}\_B^T {}^A\bm{P}\_{O\_B} +\end{align\*} + +Hence, the **inverse of the transformation matrix** can be obtain by + +\begin{equation} + {}^B\bm{T}\_A = {}^A\bm{T}\_B^{-1} = + \left[ \begin{array}{ccc|c} + & & & \\\\\\ + & {}^A\bm{R}\_B^T & & -{}^A \bm{R}\_B^T {}^A\bm{P}\_{O\_B} \\\\\\ + & & & \\\\\\ + \hline + 0 & 0 & 0 & 1 \\\\\\ + \end{array} \right] +\end{equation} + + +## Kinematics {#kinematics} + + + + +### Introduction {#introduction} + +
+
+ +Kinematic analysis refers to the study of the geometry of motion of a robot, without considering the forces an torques that cause the motion. +In this analysis, the relation between the geometrical parameters of the manipulator with the final motion of the moving platform is derived and analyzed. + +
+ +A **parallel robot** is a mechanism with a number of **closed kinematic chains**, and its moving platform is linked to the base by several independent kinematic chains. +Parallel robots for which the number of kinematic chains is equal to the number of degrees-of-freedom of the moving platform are called **fully parallel robots**.
+ +If in addition to this condition, if the type and number of joints at each limb, and the number and location of the actuated joints are identical in all the limbs, such a parallel robot is called **symmetric**.
+ +There are three main cases for fully parallel manipulators. +Planar robots with two translation and one rotational degree-of-freedom in the plane. +Spatial orientation manipulators with three rotational degrees-of-freedom in space. +And a general spatial robot with three translational and three rotational degrees-of-freedom in space.
+ +It is known that unlike serial manipulators, **inverse kinematic analysis of parallel robots is usually simple and straightforward**. +In most cases, limb variable may be computed independently using the given pose of the moving platform, and the solution in most cases even for redundant manipulators is uniquely determined. +However, **forward kinematics of parallel manipulators is generally very complicated**, and its solution usually involves systems of nonlinear equations, which are highly coupled and in general have no closed form and unique solution. + + +### Loop Closure Method {#loop-closure-method} + +A typical parallel manipulator consists of two main bodies. +Body \\(A\\) is arbitrary designated as fixed and is called the **base**, while body \\(B\\) is designated to be movable and is called the **moving platform**.
+These two bodies are coupled via \\(n\\) **limbs**, each attached to points \\(A\_i\\) and \\(B\_i\\) and called fixed and moving attachment points of the limb \\(i\\).
+ +At the **displacement** level, the **forward kinematic** problem permits the determination of the actual location or pose of the moving platform relative to the base from a set of joint-position readouts. + +At the **velocity** level, the **forward kinematic** problem refers to the determination of the translational and angular velocities of the moving platform relative to the base, from a set of joint-velocity readouts and for a known configuration.
+ +To describe the motion of the moving platform relative to the base, frame \\(\\{\bm{A}\\}\\) is attached to body \\(A\\) and frame \\(\\{\bm{B}\\}\\) to body \\(B\\). +The pose of the moving platform relative to the base is thus defined by: + +- A position vector \\(\bm{p}\\) which denotes the position vector of the origin of \\(\\{\bm{B}\\}\\) with respect to frame \\(\\{\bm{A}\\}\\) +- A \\(3\times3\\) rotation matrix \\(R\\) which denotes the rotation of \\(\\{\bm{B}\\}\\) with respect to \\(\\{\bm{A}\\}\\) + +Each limb of a parallel manipulator defines a kinematic loop passing through the origins of frames \\(\\{\bm{A}\\}\\) and \\(\\{\bm{B}\\}\\), and through the two limb attachment points \\(A\_i\\) and \\(B\_i\\). + +At the displacement level, the **closure of each kinematic loop** can be express in the vector form as +\\[ \vec{AB} = \vec{AA\_i} + \vec{A\_iB\_i} - \vec{BB\_i} \quad \text{for } i = 1,2,\dots,n \\] +in which \\(\vec{AA\_i}\\) and \\(\vec{BB\_i}\\) can be easily obtained from the geometry of the attachment points in the base and in the moving platform. + +Let us defined \\(\bm{a}\_i = \vec{AA\_i}\\) in the fixed frame \\(\\{\bm{A}\\}\\), and \\(\bm{b}\_i = \vec{BB\_i}\\) in the moving frame \\(\\{\bm{B}\\}\\). +Furthermore, \\(\bm{q}\_i = \vec{A\_iB\_i}\\) is defined as the **limb variable**, which indicated the geometry of the limb. + +
+
+ +The **loop closure** can be written as the unknown pose variables \\(\bm{p}\\) and \\(\bm{R}\\), the position vectors describing the known geometry of the base and of the moving platform, \\(\bm{a}\_i\\) and \\(\bm{b}\_i\\), and the limb vector \\(\bm{q}\_i\\) + +\begin{equation} + \bm{p} = \bm{a}\_i + \bm{q}\_i - \bm{R} \bm{b}\_i \quad \text{for } i=1,2,\dots,n +\end{equation} + +
+ +For an **inverse kinematic problem**, it is assumed that the moving platform position \\(\bm{p}\\) and orientation \\(\bm{R}\\) are given and the problem is to solve the active limb variables. +This analysis is usually straightforward and results in unique solution for the limb variables. + +However, the inverse solution is not straightforward, and usually numerical methods are used for forward kinematic solution. + + +### Kinematic Analysis of a Stewart-Gough Platform {#kinematic-analysis-of-a-stewart-gough-platform} + + +#### Mechanism Description {#mechanism-description} + +One frame \\(\\{\bm{A}\\}\\) is attached to the fixed base and frame \\(\\{\bm{B}\\}\\) is attached to the moving platform at points \\(O\_A\\) and \\(O\_B\\) respectively. + +The number of actuators is equal to the degrees-of-freedom of the manipulator and hence the manipulator is **fully parallel**. + +
+
+ +Since all the limbs are connected to the moving platform and to the base by spherical joints, **no twisting torque** can be transmitted and the force acting on each limb is directed along the longitudinal axis of the limb. + +
+ + +#### Geometry of the Manipulator {#geometry-of-the-manipulator} + +
+
+ +The position of the attachment points on the fixed base are denoted by the vectors \\(\bm{a}\_i\\) and the position of moving attachment points are denoted by the vectors \\(\bm{b}\_i\\). +The geometry of each limb is described by its length \\(l\_i\\) and its direction is denoted by a unit vector \\(\hat{\bm{s}}\_i\\). + +
+ +The position of the point \\(O\_B\\) of the moving platform is described by the **position vector** \\({}^A\bm{P} = [p\_x\ p\_y\ p\_z]^T\\) and orientation of the moving platform is described by the **rotation matrix** \\({}^A\bm{R}\_B\\) which can by represented by the components of the unit vectors \\(\hat{\bm{u}}\\), \\(\hat{\bm{v}}\\), \\(\hat{\bm{z}}\\) as follows: + +\begin{equation} + ^A\bm{R}\_B = \begin{bmatrix} + u\_x & v\_x & w\_x \\\\\\ + u\_y & v\_y & w\_y \\\\\\ + u\_z & v\_z & w\_z \\\\\\ + \end{bmatrix} +\end{equation} + + + +{{< figure src="/ox-hugo/taghirad13_stewart_schematic.png" caption="Figure 6: Geometry of a Stewart-Gough platform" >}} + +The geometry of the manipulator is shown Figure [fig:stewart_schematic](#fig:stewart_schematic). + + +#### Inverse Kinematics {#inverse-kinematics} + +
+
+ +For **inverse kinematic analysis**, it is assumed that the position \\({}^A\bm{P}\\) and orientation of the moving platform \\({}^A\bm{R}\_B\\) are given and the problem is to obtain the joint variables \\(\bm{L} = \left[ l\_1, l\_2, l\_3, l\_4, l\_5, l\_6 \right]^T\\). + +
+ +From the geometry of the manipulator, one can write: + +\begin{equation} + {}^A \bm{a}\_i + l\_i {}^A \hat{\bm{s}}\_i = {}^A\bm{P} + {}^A\bm{b}\_i +\end{equation} + +Then, we can find \\(l\_i\\) given \\({}^A\bm{P}\\) and \\({}^A\bm{R}\_B\\): + +\begin{equation} + \begin{aligned} + l\_i = &\Big[ {}^A\bm{P}^T {}^A\bm{P} + {}^B\bm{b}\_i^T {}^B\bm{b}\_i + {}^A\bm{a}\_i^T {}^A\bm{a}\_i - 2 {}^A\bm{P}^T {}^A\bm{a}\_i + \dots\\\\\\ + &2 {}^A\bm{P}^T \left[{}^A\bm{R}\_B {}^B\bm{b}\_i\right] - 2 \left[{}^A\bm{R}\_B {}^B\bm{b}\_i\right]^T {}^A\bm{a}\_i \Big]^{1/2} + \end{aligned} +\end{equation} + +If the position and orientation of the platform lie in the feasible workspace, the solution is unique. +Otherwise, the solution gives complex numbers. + + +#### Forward Kinematics {#forward-kinematics} + +
+
+ +In **forward kinematic analysis**, it is assumed that the vector of limb lengths \\(\bm{L}\\) is given and the problem is to find the position \\({}^A\bm{P}\\) and the orientation \\({}^A\bm{R}\_B\\). + +
+ +The size of the problem depends of the representation used for orientation (rotation matrix, Euler angles, ...). + +The forward kinematic problem is then to solve many **highly nonlinear equations** that are extremely difficult to solve. + +The complexity of the problem depends widely on the manipulator architecture and geometry. + + +## Jacobian: Velocities and Static Forces {#jacobian-velocities-and-static-forces} + + + + +### Introduction {#introduction} + +
+
+ +The Jacobian matrix not only reveals the **relation between the joint variable velocities of a parallel manipulator to the moving platform linear and angular velocities**, it also constructs the transformation needed to find the **actuator forces from the forces and moments acting on the moving platform**. + +
+ +For specific configurations, **local degeneracy** can occur and leads to: + +1. An instantaneous change in the degrees-of-freedom of the system and hence a **loss of controllability** +2. An important **degradation of the natural stiffness** that may lead to very high joint forces or torques + +Therefore, it is very important to **identify singular configurations** at the design stage to improve the performance. + + +### Angular and Linear Velocities {#angular-and-linear-velocities} + +To determine the absolute linear velocity of a point, the derivative must be calculated relative to a **fixed** frame. +Differentiation of a position vector with respect to a moving frame results in a relative velocity. +Therefore, it is necessary to define the arithmetics to transform the relative velocities to the absolute ones. + + +#### Angular Velocity of a Rigid Body {#angular-velocity-of-a-rigid-body} + +Angular velocity is an attribute of a rigid body that describes the rotational motion of the frame \\(\\{\bm{B}\\}\\) that is attached to the rigid body. + +
+
+ +The **angular velocity vector** \\(\bm{\Omega}\\) describes the instantaneous rotation of frame \\(\\{\bm{B}\\}\\) with respect to the fixed frame \\(\\{\bm{A}\\}\\). +The direction of \\(\bm{\Omega}\\) indicates the instantaneous axis of rotation and its magnitude indicates the speed of rotation. + +
+ +The angular velocity vector is related to the screw formalism by equation [eq:angular_velocity_vector](#eq:angular_velocity_vector). + +\begin{equation} + \tcmbox{\bm{\Omega} \triangleq \dot{\theta} \hat{\bm{s}}} +\end{equation} + +The angular velocity can be expressed in any frame. For example \\({}^A\bm{\Omega}\\) denotes the angular velocity of the rigid body expressed in the frame \\(\\{\bm{A}\\}\\) and we have: + +\begin{equation} + \begin{aligned} + ^A \bm{\Omega} &= \Omega\_x \hat{\bm{x}} + \Omega\_y \hat{\bm{y}} + \Omega\_z \hat{\bm{z}} \\\\\\ + &= \dot{\theta}\left( s\_x \hat{\bm{x}} + s\_y \hat{\bm{y}} + s\_z \hat{\bm{z}} \right) + \end{aligned} +\end{equation} + +in which \\(\Omega\_x\\), \\(\Omega\_y\\) and \\(\Omega\_z\\) are the three components of angular velocity of a rigid body expressed in frame \\(\\{\bm{A}\\}\\). + + +#### Linear Velocity of a Point {#linear-velocity-of-a-point} + +Linear velocity of a point P can be easily determined by the time derivative of the position vector \\(p\\) of that point with respect to a fixed frame: + +\begin{equation} + v\_p = \dot{p} = \left( \dv{p}{t} \right)\_{\text{fix}} +\end{equation} + +If the variation of the position vector is determined with respect to a moving frame, we obtain the relative velocity: + +\begin{equation} + v\_{\text{rel}} = \left( \pdv{p}{t} \right)\_{\text{mov}} +\end{equation} + +In classical mechanics, it is shown that the relation between absolute derivative of any vector to its relative derivative is given by: + +\begin{equation} + \left( \dv{(\cdot)}{t} \right)\_{\text{fix}} = \left( \pdv{(\cdot)}{t} \right)\_{\text{mov}} + \bm{\Omega} \times (\cdot) +\end{equation} + +in which \\(\bm{\Omega}\\) denotes the angular velocity of the moving frame with respect to the fixed frame. + +The term \\(\bm{\Omega}\times(\cdot)\\) can be written in matrix form: + +\begin{equation} + \tcmbox{\left( \dv{(\cdot)}{t} \right)\_{\text{fix}} = \left( \pdv{(\cdot)}{t} \right)\_{\text{mov}} + \bm{\Omega}^\times(\cdot)} +\end{equation} + +The matrix \\(\bm{\Omega}^\times\\) denotes a **skew-symmetric matrix** defined by: + +\begin{equation} + \tcmbox{\bm{\Omega}^\times = \begin{bmatrix} + 0 & -\Omega\_z & \Omega\_y \\\\\\ + \Omega\_z & 0 & -\Omega\_x \\\\\\ + -\Omega\_y & \Omega\_x & 0 + \end{bmatrix}} +\end{equation} + +Now consider the general motion of a rigid body shown in Figure [fig:general_motion](#fig:general_motion), in which a moving frame \\(\\{\bm{B}\\}\\) is attached to the rigid body and **the problem is to find the absolute velocity** of point \\(P\\) with respect to a fixed frame \\(\\{\bm{A}\\}\\). + + + +{{< figure src="/ox-hugo/taghirad13_general_motion.png" caption="Figure 7: Instantaneous velocity of a point \\(P\\) with respect to a moving frame \\(\\{\bm{B}\\}\\)" >}} + +The rigid body perform a general motion which is a combination of a translation, denoted by the vector \\({}^A\bm{P}\_{O\_B}\\), and an instantaneous rotation. +To determine the velocity of point \\(P\\), we start with the relation between absolute and relative position vectors: +\\[ ^A\bm{P} = {}^A\bm{P}\_{O\_B} + {}^A\bm{R}\_B {}^B\bm{P} \\] + +To derive the velocity of point \\(P\\), we differentiate with respect to time: +\\[ {}^A\bm{v}\_P = {}^A\bm{v}\_{O\_B} + {}^A\dot{\bm{R}}\_B{}^B\bm{P} + {}^A\bm{R}\_B\underbrace{{}^B\bm{v}\_P}\_{=0} \\] + +The time derivative of the rotation matrix \\({}^A\dot{\bm{R}}\_B\\) is: + +\begin{equation} + \tcmbox{{}^A\dot{\bm{R}}\_B = {}^A\bm{\Omega}^\times \ {}^A\bm{R}\_B} +\end{equation} + +And we finally obtain equation [eq:absolute_velocity_formula](#eq:absolute_velocity_formula). + +
+
+ +\begin{equation} + {}^A\bm{v}\_P = {}^A\bm{v}\_{O\_B} + {}^A\bm{\Omega}^\times \ {}^A\bm{R}\_B {}^B\bm{P} +\end{equation} + +
+ + +#### Screw Coordinates {#screw-coordinates} + +Finite rotation of a rigid body can be expressed as a rotation \\(\theta\\) about a screw axis \\(\hat{\bm{s}}\\). +Furthermore, it is shown that the angular velocity of a rigid body is also defined as the rate of instantaneous rotation angle \\(\dot{\theta}\\) about the same screw axis \\(\hat{\bm{s}}\\). + +
+
+ +The most general rigid-body displacement can be produced by a translation along a line followed by a rotation about the same line. Since this displacement is reminiscent of the displacement of a screw, it is called a **screw displacement**, and the line of axis is called the **screw axis**. + +
+ + +### Jacobian Matrices of a Parallel Manipulator {#jacobian-matrices-of-a-parallel-manipulator} + +Let \\(\bm{q} = \left[ q\_1, q\_2, \ldots, q\_m \right]^T\\) denote the vector of actuated joint coordinates (linear displacement of an actuator prismatic joint or angular rotation of an actuated revolute joint) and \\(\bm{\mathcal{X}} = \left[ x\_1, x\_2, \ldots, x\_n \right]^T\\) denote the vector of moving platform motion variables (position or orientation). + +\\(m\\) denotes the number of actuated joints in the manipulator, \\(n\\) denotes the number of degrees-of-freedom of the manipulator. + +Generally \\(m \geq n\\), in which for a fully parallel manipulator \\(m=n\\) and for redundant manipulator \\(m>n\\).
+ +\\(\bm{q}\\) and \\(\bm{\mathcal{X}}\\) are related through a system of **nonlinear algebraic equations** representing the **kinematic constraints imposed by the limbs**, which can be generally written as + +\begin{equation} + f(\bm{q}, \bm{\mathcal{X}}) = 0 +\end{equation} + +We can differentiate this equation with respect to time and obtain: + +\begin{equation} + \tcmbox{\bm{J}\_x \dot{\bm{\mathcal{X}}} = \bm{J}\_q \dot{\bm{q}}} +\end{equation} + +where + +\begin{equation} + \bm{J}\_x = \pdv{f}{\bm{\mathcal{X}}} \quad \text{and} \quad \bm{J}\_q = -\pdv{f}{\bm{q}} +\end{equation} + +
+
+ +The **general Jacobian matrix** is defined as: + +\begin{equation} + \dot{\bm{q}} = \bm{J} \dot{\bm{\mathcal{X}}} +\end{equation} + +From equation [eq:jacobians](#eq:jacobians), we have: + +\begin{equation} + \bm{J} = {\bm{J}\_q}^{-1} \bm{J}\_x +\end{equation} + +
+ + +### Velocity Loop Closure {#velocity-loop-closure} + +The **velocity loop closures** are used for **obtaining the Jacobian matrices** in a straightforward manner. +Velocity loop closures are derived by direct differentiation of kinematic loop closures.
+ +Kinematic loop closures are: +\\[ \bm{p} = \bm{a}\_i + \bm{d}\_i - \bm{R} \bm{b}\_i \quad \text{for}\ i = 1, \ldots, m \\] + +with + +- \\(\bm{p}\\) the position vector of the moving platform w.r.t. frame \\(\\{\bm{A}\\}\\) +- \\(\bm{R}\\) the rotation matrix of the moving platform +- \\(\bm{a}\_i\\) the position vector of the \\(i\\)'th limb of the fixed platform w.r.t. frame \\(\\{\bm{A}\\}\\) +- \\(\bm{b}\_i\\) the position vector of the \\(i\\)'th limb of the moving platform w.r.t. frame \\(\\{\bm{B}\\}\\) +- \\(\bm{d}\_i\\) the limb vector + +By taking the time derivative, we obtain the following **Velocity Loop Closure**: + +\begin{equation} + \tcmbox{\dot{\bm{p}} = \dot{\bm{d}\_i} - \bm{\omega} \times \bm{R} \bm{b}\_i \quad \text{for}\ i = 1, \ldots, m} +\end{equation} + + +### Singularity Analysis of Parallel Manipulators {#singularity-analysis-of-parallel-manipulators} + +The singularities occur when: + +- \\(\bm{J}\_q\\) is rank deficient (Inverse kinematic singularity) +- \\(\bm{J}\_x\\) is rank deficient (Forward kinematic singularity) + + +#### Inverse Kinematic Singularity {#inverse-kinematic-singularity} + +Inverse kinematic singularity happens when \\(\bm{J}\_q\\) (\\(m \times m\\) matrix) is rank deficient (\\(\det \bm{J}\_q = 0\\)). + +The corresponding configurations are located at the boundary of the manipulator workspace or on the internal boundaries between sub-regions of the workspace. + +In such cases, there exist nonzero vectors \\(\dot{\bm{q}}\\) which correspond to a null Cartesian twist vector \\(\dot{\bm{\mathcal{X}}}\\). In other words, infinitesimal motion of the moving platform along certain directions cannot be accomplished. +**The manipulator looses one or more degrees-of-freedom**. + + +#### Forward Kinematic Singularity {#forward-kinematic-singularity} + +Forward kinematic singularity happens when \\(\bm{J}\_x\\) (\\(m \times n\\) matrix) is rank deficient (\\(\det({\bm{J}\_x}^T \bm{J}\_x) = 0\\)). +If the manipulator is not redundantly actuated (\\(m=n\\)), then the Jacobian matrix \\(\bm{J}\_x\\) is square and the forward kinematic singularity happens when \\(\det \bm{J}\_x = 0\\).
+ +The degeneracy occur inside the manipulator's Cartesian workspace and corresponds to the set of configurations for which two different branches of forward kinematic problem meet.
+ +There exist nonzero cartesian twist vectors \\(\dot{\bm{\mathcal{X}}}\\) that are mapped into a vanishing actuator velocity vector \\(\dot{\bm{\mathcal{q}}}\\). + +The corresponding configuration will be one in which an infinitesimal motion of the platform is possible even if the actuator are locked. **The manipulator gains one or several degrees-of-freedom and its stiffness vanishes in the corresponding direction(s)**. + + +### Jacobian Analysis of the Stewart-Gough Platform {#jacobian-analysis-of-the-stewart-gough-platform} + + +#### Velocity Loop Closure {#velocity-loop-closure} + +The input joint rate is denoted by \\(\dot{\bm{\mathcal{L}}} = [ \dot{l}\_1, \dot{l}\_2, \dot{l}\_3, \dot{l}\_4, \dot{l}\_5, \dot{l}\_6 ]^T\\), and the output twist vector is denoted by \\(\dot{\bm{\mathcal{X}}} = [^A\bm{v}\_p, {}^A\bm{\omega}]^T\\).
+ +The jacobian matrix can be derived by formulating a velocity loop closure equation of each limb. +The loop closure equations for each limb are: + +\begin{equation} + {}^A\bm{P} + {}^A\bm{R}\_B {}^B\bm{b}\_i = l\_i {}^A\hat{\bm{s}}\_i + {}^A\bm{a}\_i +\end{equation} + +By differentiate this with respect to time: + +\begin{equation} + {}^A\bm{v}\_p + {}^A \dot{\bm{R}}\_B {}^B\bm{b}\_i = \dot{l}\_i {}^A\hat{\bm{s}}\_i + l\_i {}^A\dot{\hat{\bm{s}}}\_i +\end{equation} + +Moreover, we have: + +- \\({}^A\dot{\bm{R}}\_B {}^B\bm{b}\_i = {}^A\bm{\omega} \times {}^A\bm{R}\_B {}^B\bm{b}\_i = {}^A\bm{\omega} \times {}^A\bm{b}\_i\\) in which \\({}^A\bm{\omega}\\) denotes the angular velocity of the moving platform expressed in the fixed frame \\(\\{\bm{A}\\}\\). +- \\(l\_i {}^A\dot{\hat{\bm{s}}}\_i = l\_i \left( {}^A\bm{\omega}\_i \times \hat{\bm{s}}\_i \right)\\) in which \\({}^A\bm{\omega}\_i\\) is the angular velocity of limb \\(i\\) express in fixed frame \\(\\{\bm{A}\\}\\). + +Then, the velocity loop closure [eq:loop_closure_limb_diff](#eq:loop_closure_limb_diff) simplifies to +\\[ {}^A\bm{v}\_p + {}^A\bm{\omega} \times {}^A\bm{b}\_i = \dot{l}\_i {}^A\hat{\bm{s}}\_i + l\_i ({}^A\bm{\omega}\_i \times \hat{\bm{s}}\_i) \\] + +By dot multiply both side of the equation by \\(\hat{\bm{s}}\_i\\): +\\[ \hat{\bm{s}}\_i {}^A\bm{v}\_p + ({}^A\bm{b}\_i \times \hat{\bm{s}}\_i) {}^A\bm{\omega} = \dot{l}\_i \\] + +We then omit the superscript \\(A\\) and we can rearrange the 6 equations into a matrix form + +\begin{equation} + \tcmbox{\dot{\bm{\mathcal{L}}} = \bm{J} \dot{\bm{\mathcal{X}}}} +\end{equation} + +
+
+ +\begin{equation} + \bm{J} = \begin{bmatrix} + {\hat{\bm{s}}\_1}^T & (\bm{b}\_1 \times \hat{\bm{s}}\_1)^T \\\\\\ + {\hat{\bm{s}}\_2}^T & (\bm{b}\_2 \times \hat{\bm{s}}\_2)^T \\\\\\ + {\hat{\bm{s}}\_3}^T & (\bm{b}\_3 \times \hat{\bm{s}}\_3)^T \\\\\\ + {\hat{\bm{s}}\_4}^T & (\bm{b}\_4 \times \hat{\bm{s}}\_4)^T \\\\\\ + {\hat{\bm{s}}\_5}^T & (\bm{b}\_5 \times \hat{\bm{s}}\_5)^T \\\\\\ + {\hat{\bm{s}}\_6}^T & (\bm{b}\_6 \times \hat{\bm{s}}\_6)^T + \end{bmatrix} +\end{equation} + +\\(\bm{J}\\) then **depends only** on: + +- \\(\hat{\bm{s}}\_i\\) the orientation of the limbs +- \\(\bm{b}\_i\\) the position of the joints with respect to \\(O\_B\\) and express in \\(\\{\bm{A}\\}\\). + +
+ + +#### Singularity Analysis {#singularity-analysis} + +It is of primary importance to avoid singularities in a given workspace. +To study the singularity configurations of the Stewart-Gough platform, we consider the Jacobian matrix determined with the equation [eq:jacobian_formula_stewart](#eq:jacobian_formula_stewart).
+ +From equation [eq:jacobians](#eq:jacobians), it is clear that for the Stewart-Gough platform, \\(\bm{J}\_q = \bm{I}\\) and \\(\bm{J}\_x = \bm{J}\\). +Hence the manipulator has **no inverse kinematic singularities** within the manipulator workspace, but **may possess forward kinematic singularity** when \\(\bm{J}\\) becomes rank deficient. This may occur when +\\[ \det \bm{J} = 0 \\] + + +### Static Forces in Parallel Manipulators {#static-forces-in-parallel-manipulators} + +The relation between the forces/moments applied to the environment at the point of contact and the actuator forces/torques is determined and analyzed in the study of static force analysis.
+ +It is assumed that the manipulator is at a static equilibrium, and that the actuator forces required to produce the desired contact forces are determined.
+ +Two methods are usually applied: the **free-body diagram** and the **principle of virtual work**.
+ +In the **free-body diagram approach**, the actuator forces are determined to produce desired contact forces/moments as well as the internal and interacting forces/torques applied at the limbs. The analysis of such forces is essential in the design of a manipulator to determine the stresses and deflection of each link and joint.
+ +However, if only the actuator forces are desired to be determined, the **principle of virtual work** is more efficient and computationally less expensive. + + +#### Virtual Work Approach {#virtual-work-approach} + +A virtual displacement for a parallel manipulator refers to an infinitesimal change in the general displacement of the moving platform as a result of any arbitrary infinitesimal changes in the joint variables at a given instant of time.
+ +The virtual displacement of the joints can be written as \\(\delta \bm{q} = [\delta q\_1, \delta q\_2, \cdots, \delta q\_m]^T\\) and \\(\delta \bm{\mathcal{X}} = [\delta x, \delta y, \delta z, \delta \theta\_x, \delta \theta\_y, \delta \theta\_z ]^T\\) denotes the virtual displacement of a contacting point of the moving platform. +\\([\delta \theta\_x, \delta \theta\_y, \delta \theta\_z ]^T = \delta \theta \hat{\bm{s}}\\) are the orientation variables represented by screw coordinates.
+ +Let the vector of actuator forces be denoted by \\(\bm{\tau} = [\tau\_1, \tau\_2, \cdots, \tau\_m]^T\\), and the external forces/torque acting on the contact point of the moving platform denoted by a wrench in a screw coordinate as \\(\bm{\mathcal{F}} = [\bm{f}, \bm{n}]^T\\) in which \\(\bm{f} = [f\_x, f\_y, f\_z]^T\\) denotes the external forces, and \\(\bm{n} = [n\_x, n\_y, n\_z]^T\\) denotes the external torque action on the moving platform at the point of contact to the environment.
+ +We assume that the frictional forces acting on the joints are negligible, and also that the gravitational forces of the limb links are much smaller than the interacting force of the moving platform to the environment. +The principle of virtual work states that the total virtual work, \\(\delta W\\), done by all actuators and external forces is equal to zero: + +\begin{equation} + \tcmbox{\delta W = \bm{\tau}^T \delta \bm{q} - \bm{\mathcal{F}}^T \delta \bm{\mathcal{X}} = 0} +\end{equation} + +Furthermore, from the definition of the Jacobian, the virtual displacements \\(\delta \bm{q}\\) and \\(\delta \bm{\mathcal{X}}\\) are related by the Jacobian: +\\[ \delta \bm{q} = \bm{J} \cdot \delta \bm{\mathcal{X}} \\] + +We then have \\(\left( \bm{\tau}^T \bm{J} - \bm{\mathcal{F}}^T \right) \delta \bm{\mathcal{X}} = 0\\) that holds for any arbitrary virtual displacement \\(\delta \bm{\mathcal{X}}\\), hence +\\[ \bm{\tau}^T \bm{J} - \bm{\mathcal{F}}^T = 0 \\] + +
+
+ +We obtain that the **Jacobian matrix** constructs the **transformation needed to find the actuator forces** \\(\bm{\tau}\\) **from the wrench acting on the moving platform** \\(\bm{\mathcal{F}}\\): + +\begin{equation} + \bm{\mathcal{F}} = \bm{J}^T \bm{\tau} +\end{equation} + +
+ + +#### Static Forces of the Stewart-Gough Platform {#static-forces-of-the-stewart-gough-platform} + +As shown in Figure [fig:stewart_static_forces](#fig:stewart_static_forces), the twist of moving platform is described by a 6D vector \\(\dot{\bm{\mathcal{X}}} = \left[ {}^A\bm{v}\_P \ {}^A\bm{\omega} \right]^T\\), in which \\({}^A\bm{v}\_P\\) is the velocity of point \\(O\_B\\), and \\({}^A\bm{\omega}\\) is the angular velocity of moving platform.
+ + + +{{< figure src="/ox-hugo/taghirad13_stewart_static_forces.png" caption="Figure 8: Free-body diagram of forces and moments action on the moving platform and each limb of the Stewart-Gough platform" >}} + +Consider an external wrench generated by the manipulator and applied to the environment at the point \\(O\_B\\) denoted by \\(\bm{\mathcal{F}} = [\bm{f} \ \bm{n}]^T\\). + +It is assumed that no external forces are applied to the limbs except the actuator forces. +Therefore, the static force can be assumed to be **along the limb axis** \\(\hat{\bm{s}}\_i\\), and the limb is subject to a tension/compression force \\(f\_i\\). + +At static equilibrium, the summation of all acting forces on the moving platform shall be zero, therefore + +\begin{equation} + -\bm{f} + \sum\_{i=1}^6 f\_i \hat{\bm{s}}\_i = 0 +\end{equation} + +in which \\(-\bm{f}\\) if the **external force** applied to the moving platform from the environment.
+ +The summation of moments contributed by all forces acting on the moving platform about \\(O\_B\\) is as follows: + +\begin{equation} + -\bm{n} + \sum\_{i=1}^6 b\_i \times f\_i \hat{\bm{s}}\_i = 0 +\end{equation} + +in which \\(-n\\) is the **external moment** applied to the moving platform by the environment, and \\(b\_i\\) is the position vector from the point \\(O\_B\\) to the attached point \\(B\_i\\) on the moving platform.
+ +Writing the two equations together in a matrix form results in + +\begin{equation} + \begin{bmatrix} + \hat{\bm{s}}\_1 & \hat{\bm{s}}\_2 & \cdots & \hat{\bm{s}}\_6 \\\\\\ + \bm{b}\_1 \times \hat{\bm{s}}\_1 & \bm{b}\_2 \times \hat{\bm{s}}\_2 & \cdots & \bm{b}\_6 \times \hat{\bm{s}}\_6 + \end{bmatrix} \cdot \begin{bmatrix} + f\_1 \\ f\_2 \\ \vdots \\ f\_6 + \end{bmatrix} = \begin{bmatrix} + \bm{f} \\ \bm{n} + \end{bmatrix} +\end{equation} + +There we can recognize the transpose of the Jacobian matrix: + +\begin{equation} + \tcmbox{\bm{\mathcal{F}} = \bm{J}^T \bm{\tau}} +\end{equation} + +in which \\(\bm{\tau} = [f\_1, f\_2, \cdots, f\_6]^T\\) is the vector of actuator forces, and \\(\bm{\mathcal{F}} = [\bm{f}, \bm{n}]^T\\) is the 6D wrench applied by the manipulator to the environment. + + +### Stiffness Analysis of Parallel Manipulators {#stiffness-analysis-of-parallel-manipulators} + +Here, we focus on the deflections of the manipulator moving platform that are the result of the applied wrench to the environment. +The amount of these deflections are a function of the applied wrench as well as the manipulator **structural stiffness**. +Thus, the stiffness of a manipulator has a direct impact on its overall positioning accuracy if the manipulator is in contact with a stiff environment. + + +#### Stiffness and Compliance Matrices {#stiffness-and-compliance-matrices} + +The relation between the applied actuator force \\(\tau\_i\\) and the corresponding small deflection \\(\Delta q\_i\\) along the applied force axis can be approximated as a **linear function**: + +\begin{equation} + \tcmbox{\tau\_i = k\_i \cdot \Delta q\_i} +\end{equation} + +in which \\(k\_i\\) denotes the **stiffness constant of the actuator**.
+ +Re-writing the equation [eq:stiffness_actuator](#eq:stiffness_actuator) for all limbs in a matrix form result in + +\begin{equation} + \tcmbox{\bm{\tau} = \mathcal{K} \cdot \Delta \bm{q}} +\end{equation} + +in which \\(\bm{\tau}\\) is the vector of actuator forces, and \\(\Delta \bm{q}\\) corresponds to the actuator deflections. +\\(\mathcal{K} = \text{diag}\left[ k\_1 \ k\_2 \dots k\_m \right]\\) is an \\(m \times m\\) diagonal matrix composed of the actuator stiffness constants.
+ +Writing the Jacobian relation given in equation [eq:jacobian_disp](#eq:jacobian_disp) for infinitesimal deflection read + +\begin{equation} + \Delta \bm{q} = \bm{J} \cdot \Delta \bm{\mathcal{X}} +\end{equation} + +in which \\(\Delta \bm{\mathcal{X}} = [\Delta x\ \Delta y\ \Delta z\ \Delta\theta x\ \Delta\theta y\ \Delta\theta z]\\) is the infinitesimal linear and angular deflection of the moving platform. + +Furthermore, rewriting the Jacobian as the projection of actuator forces to the moving platform [eq:jacobian_forces](#eq:jacobian_forces) gives + +\begin{equation} + \bm{\mathcal{F}} = \bm{J}^T \bm{\tau} +\end{equation} + +Hence, by substituting [eq:stiffness_matrix_relation](#eq:stiffness_matrix_relation) and [eq:jacobian_disp_inf](#eq:jacobian_disp_inf) in [eq:jacobian_force_inf](#eq:jacobian_force_inf), we obtain: + +\begin{equation} + \tcmbox{\bm{\mathcal{F}} = \underbrace{\bm{J}^T \mathcal{K} \bm{J}}\_{\bm{K}} \cdot \Delta \bm{\mathcal{X}}} +\end{equation} + +Equation [eq:stiffness_jacobian](#eq:stiffness_jacobian) implies that the moving platform output wrench is related to its deflection by the **stiffness matrix** \\(K\\). + +
+
+ +\begin{equation} + \bm{K} = \bm{J}^T \mathcal{K} \bm{J} +\end{equation} + +
+ +The stiffness matrix has desirable characteristics for analysis: + +- It is a **symmetric positive definite matrix**, however, it is configuration dependent +- If the manipulator actuators have all the same stiffness constants \\(k\\), the stiffness matrix is reduced to the form \\(\bm{K} = k \bm{J}^T \bm{J}\\) + +If the stiffness matrix is inversible (\\(\det( \bm{J}^T \bm{J}) \ne 0\\)), the **compliance matrix** of the manipulator is defined as + +\begin{equation} + \tcmbox{\bm{C} = \bm{K}^{-1} = (\bm{J}^T \mathcal{K} \bm{J})^{-1}} +\end{equation} + +The compliance matrix of a manipulator shows the mapping of the moving platform wrench to its deflection by + +\begin{equation} + \Delta \bm{\mathcal{X}} = \bm{C} \cdot \bm{\mathcal{F}} +\end{equation} + + +#### Transformation Ellipsoid {#transformation-ellipsoid} + +As seen previously, the Jacobian matrix \\(\bm{J}\\) transforms n-dimensional moving platform velocity vector \\(\dot{\bm{\mathcal{X}}}\\) into m-dimensional actuated joint velocity \\(\dot{\bm{q}}\\). +Also, the Jacobian transpose \\(\bm{J}^T\\) maps m-dimensional actuated joint forces \\(\bm{\tau}\\) into n-dimensional applied wrench \\(\bm{\mathcal{F}}\\).
+ +One way to **characterize these transformation** is to compare the amplitude and direction of the moving platform velocity generated by a **unit** actuator joint velocity. +To achieve this goal, we confine the actuator joint velocity vector on a m-dimensional unit sphere +\\[ \dot{\bm{q}}^T \dot{\bm{q}} = 1 \\] +and compare the resulting moving platform velocity in n-dimensional space: +\\[ \dot{\bm{\mathcal{X}}}^T \bm{J}^T \bm{J} \dot{\bm{\mathcal{X}}} = 1 \\] + +Similarly, we can confine the exerted moving platform wrench \\(\bm{\mathcal{F}}^T \bm{\mathcal{F}} = 1\\) and compare the required actuator forces: \\(\bm{\tau}^T \bm{J} \bm{J}^T \bm{\tau} = 1\\).
+ +Consider the case of fully parallel manipulators. +Then \\(\bm{J}\bm{J}^T\\) and \\(\bm{J}^T\bm{J}\\) transformations are represented by \\(n \times n\\) matrices. +Geometrically, these transformations represent a **hyper-ellipsoid** in n-dimensional space, whose principal axes are the **eigenvectors** of \\(\bm{J}^T\bm{J}\\) and \\(\bm{J}\bm{J}^T\\) respectively. +Furthermore, the lengths of the principal axes are equal to the reciprocals of the square roots of the eigenvalues of \\(\bm{J}\bm{J}^T\\) and \\(\bm{J}^T\bm{J}\\), which are also equal to the reciprocals of the **singular values** of \\(\bm{J}\\).
+ +The shape of this hyper-ellipsoid in space indicates the characteristics of the transformation. +As this hyper-ellipsoid is closer to a hyper-sphere, the transformation becomes more uniform in different directions. +Since Jacobian matrix is **configuration dependent**, the shape of the hyper-ellipsoid is also configuration dependent, and as the moving platform moves from one pose to the other, the shape of the hyper-ellipsoid changes accordingly.
+ +A measure of the dexterity of the manipulator is the reciprocal of the Jacobian matrix condition number defined as: + +\begin{equation} + \frac{1}{\kappa} = \frac{\sigma\_{\text{min}}}{\sigma\_{\text{max}}} +\end{equation} + +in which \\(\sigma\_{\text{min}}\\) and \\(\sigma\_{\text{max}}\\) are the smallest and the largest singular values of the Jacobian matrix. + + +#### Stiffness Analysis of the Stewart-Gough Platform {#stiffness-analysis-of-the-stewart-gough-platform} + +In this section, we restrict our analysis to a 3-6 structure (Figure [fig:stewart36](#fig:stewart36)) in which there exist six distinct attachment points \\(A\_i\\) on the fixed base and three moving attachment point \\(B\_i\\). + + + +{{< figure src="/ox-hugo/taghirad13_stewart36.png" caption="Figure 9: Schematic of a 3-6 Stewart-Gough platform" >}} + +Denote the vector of actuated joint forces by \\(\bm{\tau} = [f\_1 \ f\_2 \ f\_3 \ f\_4 \ f\_5 \ f\_6 ]\\), and the corresponding vector of infinitesimal displacements of actuated limbs denoted by \\(\Delta \bm{L} = [\Delta l\_1 \ \Delta l\_2 \ \Delta l\_3 \ \Delta l\_4 \ \Delta l\_5 \ \Delta l\_6 ]\\). +The relation between \\(\Delta \bm{L}\\) and \\(\bm{\tau}\\) is described by a diagonal \\(6 \times 6\\) matrix \\(\mathcal{K}\\): +\\[ \bm{\tau} = \mathcal{K} \cdot \Delta \bm{L} \\] + +Also, from the definition of the Jacobian, we have: +\\[ \Delta \bm{L} = \bm{J} \cdot \Delta \bm{\mathcal{X}} \\] +in which \\(\Delta \bm{\mathcal{X}} = [\Delta\_x \ \Delta\_y \ \Delta\_z \ \Delta \theta\_x \ \Delta \theta\_y \ \Delta \theta\_z ]\\) is the vector of infinitesimal linear and angular motions of the moving platform. + +Also, the vector of the moving platform output wrench denoted by \\(\bm{F} = [f\_x \ f\_y \ f\_z \ n\_x \ n\_y \ n\_z ]\\) is related to the vector of actuated joint forces \\(\bm{\tau}\\) by: +\\[ \bm{F} = \bm{J}^T \cdot \bm{\tau} \\] + +By substitution, we obtain: +\\[ \bm{\mathcal{F}} = \bm{K} \cdot \Delta \bm{\mathcal{X}} \\] +in which +\\[ \bm{K} = \bm{J}^T \mathcal{K} \bm{J} \\] +where \\(K\\) is called the **stiffness matrix** of the Stewart-Gough manipulator.
+ +For a given configuration of the moving platform, the eigenvalue of the stiffness matrix represents the stiffness of the manipulator in the corresponding eigenvector direction. +Furthermore, the reciprocal of the stiffness matrix condition number may be used to represent the dexterity of the manipulator.
+ +The maximum stiffness of the manipulator can be analyzed by the maximum singular value of the Jacobian matrix. +The largest axis of the stiffness transformation hyper-ellipsoid is given by this value at each configuration. + + +## Dynamics {#dynamics} + + + + +### Introduction {#introduction} + +The dynamic analysis of parallel manipulators presents an inherent complexity due to their closed-loop structure. +Several approaches have been proposed.
+ +Traditional **Newton-Euler** formulation is used for dynamic analysis of general parallel manipulators. +In this formulation, the equation of motion of each limb and the moving platform must be derived, which inevitably leads to a large number of equations and less computational efficiency. +On the other hand, all the reaction forces can be computed, which is very useful in the design of a parallel manipulator.
+ +The **Lagrangian** formulation eliminates all the unwanted reaction forces at the outset, and therefore, is quite efficient. +However, because of the constraints imposed by the closed-loop structure, deriving explicit equations of motions in terms of a set of generalized coordinates becomes a prohibitive task.
+ +A third approach is to use the **principle of virtual work**, in which the computation of the constraint forces are bypassed. +In this method, inertial forces and moments are computed using linear and angular accelerations of the bodies. +Then, the whole manipulator is considered to be in static equilibrium by using the d'Alembert's principle, and the principle of virtual work is applied to derive the input forces and torques.
+ +Different objectives require different forms of formulations, there are three key issues pursued to derive dynamic formulation of parallel manipulators: + +1. **Calculation of internal forces** either active or passive for the design process of the manipulator +2. **Study on dynamical properties** of the manipulator for controller design +3. Utilization of dynamic specifications in an inverse dynamics controller or any **model-based control topology** + +The first item is the main advantage of the Newton-Euler formulation, the second and third items are the benefits of using the Lagrange or virtual work approaches.
+ +The dynamic equations in an **explicit form** can be written as: + +\begin{equation} + \tcmbox{\bm{M}(\bm{\mathcal{X}}) \ddot{\bm{\mathcal{X}}} + \bm{C}(\bm{\mathcal{X}}, \dot{\bm{\mathcal{X}}}) \dot{\bm{\mathcal{X}}} + \bm{G}(\bm{\mathcal{X}}) = \bm{\mathcal{F}}} +\end{equation} + +in which: + +- \\(\bm{\mathcal{X}}\\) is a vector of the generalized coordinates +- \\(\bm{M}(\bm{\mathcal{X}})\\) denotes the system **mass matrix** +- \\(\bm{C}(\bm{X},\dot{\bm{X}})\\) denotes the **Coriolis and centrifugal matrix** +- \\(\bm{G}(\bm{X})\\) denotes the **gravity vector** +- \\(\bm{\mathcal{F}}\\) denotes the generalized force + +Deriving explicit dynamic equations for parallel manipulators is a very prohibitive task because of the closed-loop nature of the manipulator. + + +### Dynamics of the Rigid Bodies {#dynamics-of-the-rigid-bodies} + + +#### Acceleration of Rigid Bodies {#acceleration-of-rigid-bodies} + +
+
+ +The acceleration analysis consists of studying the variations of linear velocity of a point and angular velocity of a rigid body with respect to time. + +
+ +Direct differentiation of these vectors with respect to time in a **fixed** frame leads to linear velocity of a point and angular velocity of a rigid body, respectively. +Note that, to determine the absolute linear velocity of a point, the derivative must be calculated relative to a fixed frame. +In the study of robotic manipulators, usually **multiple moving frames** are defined to carefully determine the motion of the moving platform. +Therefore, it is necessary to define the required arithmetics to transform the relative accelerations into absolute ones. + + +##### Angular Acceleration of a Rigid Body {#angular-acceleration-of-a-rigid-body} + +To define angular acceleration of a rigid body, consider a moving frame \\(\\{\bm{B}\\}\\) attached to the rigid body, and the motion analyzed with respect to a fixed frame. +Angular acceleration is an attribute of a rigid body and describes the variation of angular velocity of frame \\(\\{\bm{B}\\}\\) with respect to time. + +
+
+ +**Angular acceleration vector**, denoted by the symbol \\(\dot{\bm{\Omega}}\\), describes the instantaneous change of the angular velocity of frame \\(\\{\bm{B}\\}\\), denoted by \\(\bm{\Omega}\\), with respect to the fixed frame \\(\\{\bm{A}\\}\\): + +\begin{equation} + \begin{aligned} + \dot{\bm{\Omega}} = \frac{d \bm{\Omega}}{dt} &= \ddot{\theta} \hat{\bm{s}} + \dot{\theta} \dot{\hat{\bm{s}}} \\\\\\ + &= \ddot{\theta} \hat{\bm{s}} + \dot{\theta} (\bm{\Omega} \times \hat{\bm{s}}) \\\\\\ + &= \ddot{\theta} \hat{\bm{s}} + \end{aligned} +\end{equation} + +where \\(\\{\theta, \hat{\bm{s}}\\}\\) are the screw parameters representing the rotation of the rigid body. + +
+ +As shown by [eq:angular_acceleration](#eq:angular_acceleration), the angular acceleration of the rigid body is also along the screw axis \\(\hat{\bm{s}}\\) with a magnitude equal to \\(\ddot{\theta}\\). + + +##### Linear Acceleration of a Point {#linear-acceleration-of-a-point} + +Linear acceleration of a point \\(P\\) can be easily determined by time derivative of the velocity vector \\(\bm{v}\_P\\) of that point with respect to a fixed frame: + +\begin{equation} + \bm{a}\_p = \dot{\bm{v}}\_p = \left( \frac{d\bm{v}\_p}{dt} \right)\_\text{fix} +\end{equation} + +Note that this is correct only if the derivative is taken with respect to a **fixed** frame.
+ +Now consider the general motion of a rigid body, in which a moving frame \\(\\{\bm{B}\\}\\) is attached to the rigid body and the problem is to find the absolute acceleration of point \\(P\\) with respect to the fixed frame \\(\\{\bm{A}\\}\\). +The rigid body performs a general motion, which is a combination of a translation, denoted by the velocity vector \\({}^A\bm{v}\_{O\_B}\\), and an instantaneous angular rotation denoted by \\(\bm{\Omega}\\) (see Figure [fig:general_motion](#fig:general_motion)). +To determine acceleration of point \\(P\\), we start with the relation between absolute and relative velocities of point \\(P\\): + +\begin{equation} + {}^A\bm{v}\_P = {}^A\bm{v}\_{O\_B} + {}^A\bm{R}\_B{}^B\bm{v}\_P + {}^A\bm{\Omega}^\times {}^A\bm{R}\_B {}^B\bm{P} +\end{equation} + +In order to derive acceleration of point \\(P\\), we differentiate both sides with respect to time and we obtain + +\begin{equation} + \begin{split} + {}^A\bm{a}\_p\ &= {}^A\bm{a}\_{O\_B} \quad \text{(linear acc. of } \\{\bm{B}\\} \text{)} \\\\\\ + &+ {}^A\bm{R}\_B{}^B\bm{a}\_p \quad \text{(relative acc. of } P \text{ w.r.t. } \\{\bm{B}\\} \text{)} \\\\\\ + &+ {}^A\dot{\bm{\Omega}}^\times {}^A\bm{R}\_B {}^B\bm{P} \quad \text{(angular acceleration of } \\{\bm{B}\\} \text{)} \\\\\\ + &+ {}^A\bm{\Omega}^\times ({}^A\bm{\Omega}^\times {}^A\bm{R}\_B {}^B\bm{P}) \quad \text{(centrifugal)} \\\\\\ + &+ 2{}^A\bm{\Omega}^\times {}^A\bm{R}\_B {}^B\bm{v}\_P \quad \text{(Coriolis)} + \end{split} +\end{equation} + +For the case where \\(P\\) is a point embedded in the rigid body, \\({}^B\bm{v}\_P = 0\\) and \\({}^B\bm{a}\_P = 0\\) and we obtain: + +\begin{equation} + \begin{split} + {}^A\bm{a}\_P = {}^A\bm{a}\_{O\_B} &+ {}^A\dot{\bm{\Omega}}^\times {}^A\bm{R}\_B {}^B\bm{P} \\\\\\ + &+ {}^A\bm{\Omega}^\times ({}^A\bm{\Omega}^\times {}^A\bm{R}\_B {}^B\bm{P}) + \end{split} +\end{equation} + + +#### Mass Properties {#mass-properties} + +In this section, the properties of mass, namely **center of mass**, **moments of inertia** and its characteristics and the required transformations are described. + + + +{{< figure src="/ox-hugo/taghirad13_mass_property_rigid_body.png" caption="Figure 10: Mass properties of a rigid body" >}} + + +##### Center of Mass {#center-of-mass} + +Consider a reference frame \\(\\{\bm{A}\\}\\) in which the mass distribution of a material body is measured, and let \\(\bm{p}\\) denote the position vector of a differential mass \\(\rho dV\\) with respect to a reference frame. + +The **center of mass** of a rigid body is defined as the point \\(C\\) which satisfied the following condition + +\begin{equation} + \bm{p}\_c = \frac{1}{m} \int\_V \bm{p} \rho dV +\end{equation} + +in which the mass of the material body \\(\\{\bm{B}\\}\\) with density \\(\rho\\) and volume \\(V\\) is defined as + +\begin{equation} + m = \int\_V \rho dV +\end{equation} + + +##### Moments of Inertia {#moments-of-inertia} + +As opposed to the mass, which introduces inertia to linear accelerations, moment of inertia is the property of mass which introduces **inertia to angular accelerations**. +Basically, for rotational motion, **the distribution of mass with respect to the axis of rotation introduces resistance to the angular acceleration**.
+ +Moments of inertia \\(\bm{I}\\) about \\(\bm{A}\\) is defined by the second moment of the mass with respect to a reference frame of rotation as: + +\begin{equation} + {}^A\bm{I} = \begin{bmatrix} + I\_{XX} & I\_{XY} & I\_{XZ} \\\\\\ + I\_{YX} & I\_{YY} & I\_{YZ} \\\\\\ + I\_{ZX} & I\_{ZY} & I\_{ZZ} + \end{bmatrix} +\end{equation} + +in which + +\begin{equation\*} + \begin{aligned} + I\_{XX} &= \int\_V (y^2 + z^2) \rho dV, \quad I\_{XY} = I\_{YX} = -\int\_V xy \rho dV \\\\\\ + I\_{YY} &= \int\_V (x^2 + z^2) \rho dV, \quad I\_{YZ} = I\_{ZY} = -\int\_V yz \rho dV \\\\\\ + I\_{ZZ} &= \int\_V (x^2 + y^2) \rho dV, \quad I\_{XZ} = I\_{ZX} = -\int\_V xz \rho dV + \end{aligned} +\end{equation\*} + + +##### Principal Axes {#principal-axes} + +As seen in equation [eq:moment_inertia](#eq:moment_inertia), the inertia matrix elements are a function of mass distribution of the rigid body with respect to the frame \\(\\{\bm{A}\\}\\). +Hence, it is possible to find **orientations of frame** \\(\\{\bm{A}\\}\\) in which the product of inertia terms vanish and inertia matrix becomes **diagonal**: + +\begin{equation} + {}^A\bm{I} = \begin{bmatrix} + I\_{XX} & 0 & 0 \\\\\\ + 0 & I\_{YY} & 0 \\\\\\ + 0 & 0 & I\_{ZZ} + \end{bmatrix} +\end{equation} + +Such axes are called the **principal axes of inertia**, and diagonal terms are called the **principal moments of inertia**, which represent the maximum, minimum and intermediate values of the moments of inertia for a particular chosen origin \\(\\{\bm{A}\\}\\). + +It can be shown that the principal moments of inertial and principal axes are invariant parameters and can be determined from an eigen value decomposition of the inertia matrix in any configuration of the reference frame \\(\\{\bm{A}\\}\\). + + +##### Inertia Matrix Transformations {#inertia-matrix-transformations} + +The moment of inertia is usually given for **frames passing through the center of mass of the rigid body**. +**The inertia matrix changes under change of the reference frame**.
+ +Consider frame \\(\\{\bm{C}\\}\\) **parallel** to \\(\\{\bm{A}\\}\\) and attached to the center of mass of a rigid body and let \\(\bm{p}\_c = [x\_c, y\_c, z\_c]^T\\) denote the vector of the position of the center of mass with respect to frame \\(\\{\bm{A}\\}\\). +The relation between the inertia matrix about \\(A\\) and that about \\(C\\) is given by the following relation: + +\begin{equation} + \tcmbox{{}^A\bm{I} = {}^C\bm{I} + m(\bm{p}\_c^T \bm{p}\_c \bm{I}\_{3 \times 3} - \bm{p}\_c \bm{p}\_c^T)} +\end{equation} + +in which \\(m\\) denotes the mass of the rigid body and \\(\bm{I}\_{3 \times 3}\\) denotes the identity matrix.
+ +On the other hand, if the reference frame \\(\\{B\\}\\) has **pure rotation** with respect to the frame attached to the center of mass \\(\\{A\\}\\): + +\begin{equation} + {}^A\bm{I} = {}^A\bm{R}\_C {}^C\bm{I} {}^A\bm{R}\_C^T +\end{equation} + + +#### Momentum and Kinetic Energy {#momentum-and-kinetic-energy} + + +##### Linear Momentum {#linear-momentum} + +Linear momentum of a material body, shown in Figure [fig:angular_momentum_rigid_body](#fig:angular_momentum_rigid_body), with respect to a reference frame \\(\\{\bm{A}\\}\\) is defined as + +\begin{equation} + {}^A\bm{G} = \int\_V \frac{d\bm{p}}{dt} \rho dV +\end{equation} + +For any mass element \\(\rho dV\\), the position vector \\(\bm{p}\\) can be written as +\\[ p = p\_c + r \\] + +And because \\(\int\_V r \rho dV = 0\\), we have by substitution +\\[ {}^A\bm{G} = \frac{d\bm{p}\_c}{dt} \int\_V \rho dV \\] +and thus + +\begin{equation} + \tcmbox{{}^A\bm{G} = m \cdot {}^A\bm{v}\_C} +\end{equation} + +in which \\({}^A\bm{v}\_C\\) denotes the velocity of the center of mass with respect to the frame \\(\\{\bm{A}\\}\\).
+ +This result implies that the **total linear momentum** of differential masses is equal to the linear momentum of a **point mass** \\(m\\) located at the **center of mass**. +This highlights the important of the center of mass in dynamic formulation of rigid bodies. + + + +{{< figure src="/ox-hugo/taghirad13_angular_momentum_rigid_body.png" caption="Figure 11: The components of the angular momentum of a rigid body about \\(A\\)" >}} + + +##### Angular Momentum {#angular-momentum} + +Consider the solid body represented in Figure [fig:angular_momentum_rigid_body](#fig:angular_momentum_rigid_body). +Angular momentum of the differential masses \\(\rho dV\\) about a reference point \\(A\\), expressed in the reference frame \\(\\{\bm{A}\\}\\) is defined as +\\[ {}^A\bm{H} = \int\_V \left(\bm{p} \times \frac{d\bm{p}}{dt} \right) \rho dV \\] +in which \\(d\bm{p}/dt\\) denotes the velocity of differential mass with respect to the reference frame \\(\\{\bm{A}\\}\\). + +By substituting \\(\bm{p} = \bm{p}\_c + \bm{r}\\) in the previous equations, be obtain: +\\[ {}^A\bm{H} = \bm{p}\_c \times m \bm{v}\_c + \int\_V \bm{r} \times (\bm{\Omega} \times \bm{r}) \rho dV \\] + +Therefore, angular momentum of the rigid body about point \\(A\\) is reduced to + +\begin{equation} + \tcmbox{{}^A\bm{H} = \bm{p}\_c \times \bm{G}\_c + {}^C\bm{H}} +\end{equation} + +in which +\\[ {}^C\bm{H} = \int\_V \bm{r} \times (\bm{\Omega} \times \bm{r}) \rho dV = {}^C\bm{I} \cdot \bm{\Omega} \\] + +Equation [eq:angular_momentum](#eq:angular_momentum) reveals that angular momentum of a rigid body about a point \\(A\\) can be written as \\(\bm{p}\_c \times \bm{G}\_c\\), which is the contribution of linear momentum of the rigid body about point \\(A\\), and \\({}^C\bm{H}\\) which is the angular momentum of the rigid body about the center of mass. + +This also highlights the important of the center of mass in the dynamic analysis of rigid bodies. +If the center of mass is taken as the reference point, the relation describing angular momentum [eq:angular_momentum](#eq:angular_momentum) is very analogous to that of linear momentum [eq:linear_momentum](#eq:linear_momentum). + + +##### Kinetic Energy {#kinetic-energy} + +The Kinetic energy of a rigid body is defined as + +\begin{equation} + \tcmbox{\bm{K} = \frac{1}{2} \int\_V \bm{v} \cdot \bm{v} \rho dV} +\end{equation} + +The velocity of a differential mass \\(\rho dV\\) can be represented by linear velocity of the center of mass and angular velocity of the rigid body as +\\[ \bm{v} = \bm{v}\_p + \bm{\Omega} \times \bm{r} \\] + +By substitution, the kinetic energy of the rigid body may be obtained by: + +\begin{equation} + \tcmbox{\bm{K} = \frac{1}{2} \bm{v}\_c \times \bm{G}\_c + \frac{1}{2} \bm{\Omega} \cdot {}^C\bm{H}} +\end{equation} + +in which \\(\bm{G}\_C\\) is the linear momentum of the rigid body and \\({}^C\bm{H}\\) is the angular momentum of the rigid body about the center of mass.
+ +This equation reveals that kinetic energy of a moving body can be represented as the **kinetic energy of a point mass located as the center of mass**, in addition to the **kinetic energy of a body rotating about the center of mass**. + + +#### Newton-Euler Laws {#newton-euler-laws} + +The Newton and Euler laws can be written for three different cases where the angular motion: + +1. is about a fixed point in space +2. is represented about the center of mass +3. is represented about an arbitrary moving point in space + +We only examine the case in which all rotations are represented about the center of mass.
+ +Consider a rigid body under general motion, that is, a combination of translation and rotation. + +
+
+ +The Newton's law relates the change of linear momentum of the rigid body to the resulting external forces applied to it +\\[ \sum \bm{f}\_\text{ext} = \frac{d\bm{G}\_c}{dt} \\] + +
+ +For the case of a constant mass rigid body, this law is reduced to +\\[ \sum \bm{f}\_\text{ext} = m \frac{d\bm{v}\_c}{dt} = m \bm{a}\_c \\] +in which \\(\bm{a}\_c\\) is the linear acceleration of the center of mass.
+ +
+
+ +The Euler's law relates the change of angular momentum of a rigid body about the center of mass, to the summation of all external moments applied to the rigid body about center of mass +\\[ \sum {}^c\bm{n}\_\text{ext} = \frac{d}{dt}({}^c\bm{H}) \\] + +
+ +For the case of a constant mass rigid body, this law is reduced to +\\[ \sum {}^c\bm{n}\_\text{ext} = \frac{d}{dt}({}^c\bm{I} \bm{\Omega}) = {}^c\bm{I} \dot{\bm{\Omega}} + \bm{\Omega} \times ({}^c\bm{I} \bm{\Omega}) \\] +in which \\(\sum {}^c\bm{n}\_\text{ext}\\) is the summation of all external moments applied to the rigid body about the center of mass, \\({}^c\bm{I}\\) is the moment of inertia about the center of mass, and \\(\bm{\Omega}\\) is the angular velocity of the rigid body. + + +### Newton-Euler Formulation {#newton-euler-formulation} + +The most popular approach used in robotics to derive the dynamic equation of motion of a parallel manipulator is the **Newton-Euler formulation**. + +In the Newton-Euler formulation, the **free-body diagrams** of all the limbs and moving platform are considered and the **Newton Euler laws are applied to each isolated body**. +To apply the laws to each body, it is necessary to derive linear acceleration of links, center of mass, as well as angular acceleration of the links. +Hence, **acceleration analysis** would be performed on all the links of the manipulator and the moving platform.
+ +Furthermore, all the external forces and moments applied to the links and to the moving platform must be carefully determined. +Gravitational forces acting on the center of masses, frictional forces and moments acting on the joints, and any possible disturbance force or moment applied to the links and to the moving platform would be identified. +The most important external forces or moments applied on the manipulator are the one applied by the actuators, denoted by \\(\bm{\tau} = [\tau\_{1}, \tau\_{2}, \dots, \tau\_{m}]^{T}\\). +The forces and moments shall be derived from the set of Newton-Euler laws, which are written separately for each link and the moving platform.
+ +Finally, by elimination of these constraints forces and moments on the Newton-Euler equations written for the moving platform, the dynamic equations **relating the actuator forces and moments** \\(\bm{\tau}\\) to the **motion variables of the moving platform** \\(\bm{\mathcal{X}}\\), \\(\dot{\bm{\mathcal{X}}}\\) and \\(\ddot{\bm{\mathcal{X}}}\\) are derived. + + +#### Dynamic Formulation of the Stewart-Gough Platform {#dynamic-formulation-of-the-stewart-gough-platform} + + +##### Acceleration Analysis {#acceleration-analysis} + +In acceleration analysis, it is intended to **derive expressions for linear and angular acceleration of the limbs**, namely \\(\ddot{l}\_{i}\\) and \\(\dot{\bm{\omega}}\_{i}\\) as a function of the moving platform acceleration \\(\ddot{\bm{\mathcal{X}}} = [\dot{\bm{v}}\_{p}, \dot{\bm{\omega}}]^{T}\\). +To obtain such a relation, let us rewrite the **velocity loop closure**: + +\begin{equation} + \bm{v}\_{p} + \bm{\omega} \times \bm{b}\_{i} = \dot{l}\_{i} \hat{\bm{s}}\_{i} + l\_{i}(\bm{\omega}\_{i} \times \hat{\bm{s}}\_{i}) +\end{equation} + +Since there is no actuation torque about \\(\hat{\bm{s}}\_{i}\\), the limb angular velocity and acceleration vectors (\\(\bm{\omega}\_i\\) and \\(\dot{\bm{\omega}}\_i\\)) are normal to \\(\hat{\bm{s}}\_{i}\\) provided that the following assumption are considered for the platform: + +- both end joints of the limb are spherical +- the limbs are symmetric with respect to their axes +- the effects of friction in spherical joints are neglected + +Considering these assumptions, it can be concluded that the limbs cannot spin about their axes: \\(\bm{\omega}\_{i} \cdot \hat{\bm{s}}\_{i} = 0\\) and \\((\hat{\bm{s}}\_i \times (\bm{\omega}\_i \times \hat{\bm{s}}\_i)) = \bm{\omega}\_i\\). + +To obtain the angular velocity of the limbs \\(\bm{\omega}\_{i}\\), we cross multiply \\(\hat{\bm{s}}\_{i}\\) to both sides of the previous equation: + +\begin{equation} + \bm{\omega}\_{i} = \frac{1}{l\_i} ( \hat{\bm{s}}\_i \times \bm{v}\_{b\_i} ) +\end{equation} + +With \\(\bm{v}\_{b\_{i}}\\) an **intermediate variable** corresponding to the velocity of point \\(\bm{b}\_{i}\\): + +\begin{equation} + \bm{v}\_{b\_{i}} = \bm{v}\_{p} + \bm{\omega} \times \bm{b}\_{i} +\end{equation} + +As illustrated in Figure [fig:free_body_diagram_stewart](#fig:free_body_diagram_stewart), the piston-cylinder structure of the limbs is decomposed into two separate parts, the masses of which are denoted by \\(m\_{i\_1}\\) and \\(m\_{i\_2}\\). +The position vector of these two center of masses can be determined by the following equations: + +\begin{align} + \bm{p}\_{i\_1} &= \bm{a}\_{i} + c\_{i\_1} \hat{\bm{s}}\_{i} \\\\\\ + \bm{p}\_{i\_2} &= \bm{a}\_{i} + ( l\_i - c\_{i\_2}) \hat{\bm{s}}\_{i} +\end{align} + + + +{{< figure src="/ox-hugo/taghirad13_free_body_diagram_stewart.png" caption="Figure 12: Free-body diagram of the limbs and the moving platform of a general Stewart-Gough manipulator" >}} + +By differentiating the previous equations and doing some manipulations, we obtain: + +\begin{align} + \ddot{l}\_i &= \bm{a}\_{b\_i} \times \hat{\bm{s}}\_i + l\_i (\bm{\omega\_i} \cdot \bm{\omega\_i}) \\\\\\ + \dot{\bm{\omega}}\_i &= \frac{1}{l\_i} (\hat{\bm{s}}\_i \times \bm{a}\_{b\_i} - 2 \dot{l}\_i \bm{\omega}\_i) \\\\\\ + \bm{a}\_{i\_1} &= c\_{i\_1} ( \dot{\bm{\omega}}\_i \times \hat{\bm{s}}\_{i} + \bm{\omega}\_i \times (\bm{\omega}\_i \times \hat{\bm{s}}\_i)) \\\\\\ + \bm{a}\_{i\_2} &= ( l\_i - c\_{i\_2}) (\dot{\bm{\omega}}\_i \times \hat{\bm{s}}\_{i} - (\bm{\omega}\_i \cdot \bm{\omega}\_i) \hat{\bm{s}}\_i) + 2 \dot{l}\_i (\bm{\omega}\_i \times \hat{\bm{s}}\_i) + \ddot{l}\_i \hat{\bm{s}}\_i +\end{align} + +with + +\begin{equation} + \bm{a}\_{b\_i} = \bm{a}\_p + \dot{\bm{\omega}} \times \bm{b}\_i + \bm{\omega} \times (\bm{\omega} \times \bm{b}\_i) +\end{equation} + + +##### Dynamic Formulation of the Limbs {#dynamic-formulation-of-the-limbs} + +To derive the dynamic formulation of the Stewart-Gough platform, the manipulator is decomposed into a moving platform and six identical limbs. +We assume that each limb consists of two parts, the cylinder and the piston, where the velocities and the accelerations of their centers of masses are determined. +We also assume that the centers of masses of the cylinder and the piston are located at a distance of \\(c\_{i\_1}\\) and \\(c\_{i\_2}\\) above their foot points, and their masses are denoted by \\(m\_{i\_1}\\) and \\(m\_{i\_2}\\). +Moreover, consider that the pistons are symmetric about their axes, and their centers of masses lie at their midlengths. + +The free-body diagrams of the limbs and the moving platforms is given in Figure [fig:free_body_diagram_stewart](#fig:free_body_diagram_stewart). +The reaction forces at fixed points \\(A\_i\\) are denoted by \\(\bm{f}\_{a\_i}\\), the internal force at moving points \\(B\_i\\) are dentoed by \\(\bm{f}\_{b\_i}\\), and the internal forces and moments between cylinders and pistons are denoted by \\(\bm{f}\_{c\_i}\\) and \\(\bm{M\_{c\_i}}\\) respectively. + +Assume that the only existing external disturbance wrench is applied on the moving platform and is denoted by \\(\bm{\mathcal{F}}\_d = [\bm{F}\_d, \bm{n}\_d]^T\\). + +
+
+ +\begin{equation} +\begin{aligned} + \bm{f}\_{b\_i} &= \frac{1}{l\_i}(I\_{xx\_i} + l\_i^2 m\_{c\_e}) \hat{\bm{s}}\_i \times \dot{\bm{\omega}}\_i \\\\\\ + &\quad + \frac{2}{l\_i} m\_{i\_2} c\_{i\_2} \dot{l}\_i \hat{\bm{s}}\_i \times \bm{\omega}\_i \\\\\\ + &\quad - m\_{g\_e} \hat{\bm{s}}\_i \times (\hat{\bm{s}}\_i \times \bm{g}) +\end{aligned} +\end{equation} + +in which \\(m\_{c\_e}\\) is defined as + +\begin{equation} + m\_{c\_e} = \frac{1}{l\_i^2} \left( m\_{i\_1} c\_{i\_1}^2 + m\_{i\_2} c\_{i\_2}^2 \right) +\end{equation} + +
+ + +##### Dynamic Formulation of the Moving Platform {#dynamic-formulation-of-the-moving-platform} + +Assume that the **moving platform center of mass is located at the center point** \\(P\\) and it has a mass \\(m\\) and moment of inertia \\({}^A\bm{I}\_{P}\\). +Furthermore, consider that gravitational force and external disturbance wrench are applied on the moving platform, \\(\bm{\mathcal{F}}\_d = [\bm{F}\_d, \bm{n}\_d]^T\\) as depicted in Figure [fig:free_body_diagram_stewart](#fig:free_body_diagram_stewart). + +The Newton-Euler formulation of the moving platform is as follows: + +\begin{align} + \sum \bm{F}\_{\text{ext}} &= \sum\_{i=1}^6 \bm{f}\_{b\_i} + m \bm{g} + \bm{F}\_{d} = m \bm{a}\_p \\\\\\ + \sum {}^p\bm{n}\_{\text{ext}} &= \bm{n}\_d + \sum\_{i=1}^6 \bm{b}\_i \times \bm{f}\_{b\_i} \nonumber \\\\\\ + & = {}^{A}\bm{I}\_{P} \dot{\bm{\omega}} + \bm{\omega} \times {}^{A}\bm{I}\_{P} \bm{\omega} +\end{align} + +in which \\({}^A\bm{I}\_P\\) is considered in the fixed frame \\(\\{\bm{A}\\}\\) and can be calculated by: + +\begin{equation} + {}^A\bm{I}\_P = {}^A\bm{R}\_B {}^B\bm{I}\_P {}^A\bm{R}\_B^T +\end{equation} + +These equations can be rewritten in an implicit form as + +
+
+ +\begin{align} + & m(\bm{a}\_p - \bm{g}) - \bm{F}\_d - \sum\_{i=1}^{6} \bm{f}\_{b\_i} = \bm{0} \label{eq:dyn\_form\_implicit\_trans}\\\\\\ + & {}^A\bm{I}\_P \dot{\bm{\omega}} + \bm{\omega} \times {}^A\bm{I}\_P\bm{\omega} - \bm{n}\_d - \sum\_{i=1}^{6} \bm{b}\_{i} \times \bm{f}\_{b\_i} = \bm{0} \label{eq:dyn\_form\_implicit\_rot} +\end{align} + +
+ +These two equations are the governing dynamic formulation of the Stewart-Gough platform, in which \\(\bm{\mathcal{F}}\_d = [\bm{F}\_{d}, \bm{n}\_{d}]^T\\) denotes the disturbance wrench exerted on the moving plateform. + +They can be viewed in an implicit vector form of + +\begin{equation} + \bm{f}(\bm{\mathcal{X}}, \dot{\bm{\mathcal{X}}}, \ddot{\bm{\mathcal{X}}}, \bm{\mathcal{F}}\_d, \bm{\tau}) = \bm{0} +\end{equation} + +in which \\(\bm{\mathcal{X}} = [\bm{x}\_P, \bm{\theta}]^T\\) is the motion variable of the moving platform consisting of the linear position of point \\(P\\) and the moving platform orientation represented by screw coordinates. + + +#### Closed-Form Dynamics {#closed-form-dynamics} + +While dynamic formulation in the form of Equation [eq:dynamic_formulation_implicit](#eq:dynamic_formulation_implicit) can be used to simulate inverse dynamics of the Stewart-Gough platform, its implicit nature makes it unpleasant for the dynamic analysis and control. + + +##### Closed-Form Dynamics of the Limbs {#closed-form-dynamics-of-the-limbs} + +To derive a closed-form dynamic formulation for the Stewart-Gough platform as + +\begin{equation} + \bm{M}(\bm{\mathcal{X}})\ddot{\bm{\mathcal{X}}} + \bm{C}(\bm{\mathcal{X}}, \dot{\bm{\mathcal{X}}})\dot{\bm{\mathcal{X}}} + \bm{G}(\bm{\mathcal{X}}) = \bm{\mathcal{F}} +\end{equation} + +first consider an intermediate generalized coordinate \\(x\_i\\), which is in fact the position of point \\(b\_i\\). +This generalized coordinate is used to harmonize the limb and the moving platform dynamic formulation and to **derive an closed-form structure for the whole manipulator**. + +Now, manipulate each limb dynamic equations to convert them into the closed form. +Let us first introduce some relations to substitute kinematic parameters like \\(\dot{\bm{\omega}}\_i\\), \\(\ddot{\bm{\omega}}\_i\\), \\(\ddot{l}\_i\\) with the intermediate generalized coordinate \\(x\_i\\) and its time derivatives. + +After some manipulations, we obtain the following closed form equation: + +\begin{equation} + \bm{M}\_i \ddot{\bm{x}}\_i + \bm{C}\_i \dot{\bm{x}}\_i + \bm{G}\_i = \bm{F}\_i +\end{equation} + +the corresponding mass matrix \\(\bm{M}\_i\\), the Coriolis matrix \\(\bm{C}\_i\\), and the gravity vector \\(\bm{G}\_i\\) can be simplified into the following form: + +\begin{align} + \bm{M}\_i &= m\_{i\_2}\hat{\bm{s}}\_i \hat{\bm{s}}\_i^T - \frac{1}{l\_i^2} I\_{xx\_i} \hat{\bm{s}}\_{i \times}^2 \\\\\\ + \bm{C}\_i &= -\frac{2}{l\_i} m\_{c\_o}\dot{l}\_i \hat{\bm{s}}\_{i\times}^2 - \frac{1}{l\_i^2} m\_{i\_2} c\_{i\_2} \hat{\bm{s}}\_i \dot{\bm{x}}\_i^T \hat{\bm{s}}\_{i \times}^2 \\\\\\ + \bm{G}\_i &= \big( m\_{g\_e} \hat{\bm{s}}\_{i\times}^2 - m\_{i\_2} \hat{\bm{s}}\_i \hat{\bm{s}}\_i^T \big) \bm{g} \\\\\\ + \bm{F}\_i &= -\bm{f}\_{b\_i} + \tau\_i \hat{\bm{s}}\_i +\end{align} + +in which + +\begin{align} + m\_{c\_e} &= \frac{1}{l\_i^2} (m\_{i\_1} c\_{i\_1}^2 + m\_{i\_2} c\_{i\_2}^2) \\\\\\ + m\_{c\_o} &= \frac{1}{l\_i} m\_{i\_2} c\_{i\_2} - \frac{1}{l\_i^2} (I\_{xx\_i} + l\_i^2 m\_{c\_e}) \\\\\\ + m\_{g\_e} &= \frac{1}{l\_i} (m\_{i\_1} c\_{i\_1} + m\_{i\_2}(l\_i - c\_{i\_2})) +\end{align} + + +##### Closed-Form Dynamics of the Moving Platform {#closed-form-dynamics-of-the-moving-platform} + +In this section, the dynamic equations of the moving platform are transformed in the following closed-form formulation + +\begin{equation} + \bm{M}\_{p}\ddot{\bm{\mathcal{X}}} + \bm{C}\_{p}\dot{\bm{\mathcal{X}}} + \bm{G}\_{p} = \bm{\mathcal{F}}\_{p} +\end{equation} + +in which \\(\bm{\mathcal{X}}\\) consists of six coordinates: the first three \\(\bm{x}\_p\\) represent linear motion of the moving platform, and the last three \\(\bm{\theta}\\) its angular motion. +It is preferable to use the **screw coordinates** for representing the angular motion **as its derivative is also a vector representing angular velocity**: + +\begin{equation} + \tcmbox{\bm{\mathcal{X}} = \begin{bmatrix}\bm{x}\_p \\ \bm{\theta}\end{bmatrix}; \quad + \dot{\bm{\mathcal{X}}} = \begin{bmatrix}\bm{v}\_p \\ \bm{\omega}\end{bmatrix}; \quad + \ddot{\bm{\mathcal{X}}} = \begin{bmatrix}\bm{a}\_p \\ \dot{\bm{\omega}}\end{bmatrix}} +\end{equation} + +Equations [eq:dyn_form_implicit_trans](#eq:dyn_form_implicit_trans) and [eq:dyn_form_implicit_rot](#eq:dyn_form_implicit_rot) can be simply converted into a closed form of Equation [eq:close_form_dynamics_platform](#eq:close_form_dynamics_platform) with the following terms: + +\begin{equation} + \begin{aligned} + &\bm{M}\_p = \begin{bmatrix} m\bm{I}\_{3 \times 3} & \bm{O}\_{3\times 3} \\ \bm{O}\_{3\times 3} & {}^A \bm{I}\_p \end{bmatrix}\_{6\times 6}; \bm{C}\_p = \begin{bmatrix} \bm{O}\_{3\times 3} & \bm{O}\_{3\times 3} \\ \bm{O}\_{3\times 3} & \omega\_{\times} {}^A\bm{I}\_p \end{bmatrix}\_{6 \times 6} \\\\\\ + &\bm{G}\_p = \begin{bmatrix}-m\bm{g} \\ \bm{O}\_{3\times 1}\end{bmatrix}\_{6 \times 1}; \bm{\mathcal{F}}\_p = \begin{bmatrix} \bm{F}\_d + \sum \bm{f}\_{b\_i} \\ \bm{n}\_d + \sum \bm{b}\_{i \times} \bm{f}\_{b\_i} \end{bmatrix}\_{6\times 1} + \end{aligned} +\end{equation} + + +##### Closed-Form Dynamics of the Stewart-Gough Manipulator {#closed-form-dynamics-of-the-stewart-gough-manipulator} + +To derive the closed-form dynamic formulation for the whole manipulator, a transformation is required to map the intermediate generalized coordinates \\(x\_i\\) into the principal generalized coordinates \\(\bm{\mathcal{X}}\\). + +Using such a transformation, and by adding the resulting equations of the limbs and the moving platform, the internal forces \\(\bm{f}\_{b\_i}\\) can be eliminated, and closed-form dynamic formulation for the whole manipulator can be derived. + +To generate such a transformation define a Jacobian matrix \\(\bm{J}\_i\\) relating the intermediate coordinates to that of the principal generalized coordinate: + +\begin{equation} + \dot{\bm{x}}\_i = \bm{J}\_i \dot{\bm{\mathcal{X}}} +\end{equation} + +in which + +\begin{equation} + \bm{J}\_i = \begin{bmatrix} \bm{I}\_{3 \times 3} & -\bm{b}\_{i \times} \end{bmatrix} +\end{equation} + +\begin{equation} + \bm{M}\_{li} \ddot{\bm{x}}\_i + \bm{C}\_{li} + \dot{\bm{x}}\_i + \bm{G}\_{li} = \bm{\mathcal{F}}\_{li} +\end{equation} + +in which + +\begin{equation} + \begin{aligned} + \bm{M}\_{li} = \bm{J}\_i^T \bm{M}\_i \bm{J}\_i; \quad & \bm{C}\_{li} = J\_i^T \bm{M}\_i \dot{\bm{J}}\_i + \bm{J}\_i^T \bm{C}\_i \bm{J}\_i \\\\\\ + \bm{G}\_{li} = \bm{J}\_i^T G\_i; \quad & \bm{\mathcal{F}}\_{li} = \bm{J}\_i^T \bm{F}\_i + \end{aligned} +\end{equation} + +
+
+ +\begin{equation} + \begin{aligned} + \bm{M}(\bm{\mathcal{X}}) &= \bm{M}\_p + \sum\_{i=1}^6 \bm{M}\_{li} \\\\\\ + \bm{C}(\bm{\mathcal{X}}, \dot{\bm{\mathcal{X}}}) &= \bm{C}\_p + \sum\_{i=1}^6 \bm{C}\_{li} \\\\\\ + \bm{G}(\bm{\mathcal{X}}) &= \bm{G}\_p + \sum\_{i=1}^6 \bm{G}\_{li} \\\\\\ + \bm{\mathcal{F}}(\bm{\mathcal{X}}) &= \bm{\mathcal{F}}\_d + \sum\_{i=1}^6 \bm{\mathcal{F}}\_{\tau\_i} + \end{aligned} +\end{equation} + +
+ + +##### Forward Dynamics Simulations {#forward-dynamics-simulations} + +As shown in Figure [fig:stewart_forward_dynamics](#fig:stewart_forward_dynamics), it is **assumed that actuator forces and external disturbance wrench applied to the manipulator are given and the resulting trajectory of the moving platform is to be determined**. + + + +{{< figure src="/ox-hugo/taghirad13_stewart_forward_dynamics.png" caption="Figure 13: Flowchart of forward dynamics implementation sequence" >}} + +The closed-form dynamic formulation of the Stewart-Gough platform corresponds to the set of equations given in [eq:closed_form_dynamic_stewart_wanted](#eq:closed_form_dynamic_stewart_wanted), whose terms are given in [eq:close_form_dynamics_stewart_terms](#eq:close_form_dynamics_stewart_terms). + + +##### Inverse Dynamics Simulation {#inverse-dynamics-simulation} + +In inverse dynamics simulations, it is assumed that the **trajectory of the manipulator is given**, and the **actuator forces required to generate such trajectories are to be determined**. + +As illustrated in Figure [fig:stewart_inverse_dynamics](#fig:stewart_inverse_dynamics), inverse dynamic formulation is implemented in the following sequence. +The first step is trajectory generation for the manipulator moving platform. +Many different algorithms are developed for a smooth trajectory generation. +For such a trajectory, \\(\bm{\mathcal{X}}\_{d}(t)\\) and the time derivatives \\(\dot{\bm{\mathcal{X}}}\_{d}(t)\\), \\(\ddot{\bm{\mathcal{X}}}\_{d}(t)\\) are known. + +The next step is to solve the inverse kinematics of the manipulator and to find the limbs' linear and angular positions, velocity and acceleration as a function of the manipulator trajectory. +The manipulator Jacobian matrix \\(\bm{J}\\) is also calculated in this step. + +Next, the dynamic matrices given in the closed-form formulations of the limbs and the moving platform are calculated using equations [eq:closed_form_intermediate_parameters](#eq:closed_form_intermediate_parameters) and [eq:close_form_dynamics_stewart_terms](#eq:close_form_dynamics_stewart_terms), respectively.
+ +To combine the corresponding matrices, an to generate the whole manipulator dynamics, it is necessary to find intermediate Jacobian matrices \\(\bm{J}\_i\\), given in [eq:jacobian_intermediate](#eq:jacobian_intermediate), and then compute compatible matrices for the limbs given in [eq:closed_form_stewart_manipulator](#eq:closed_form_stewart_manipulator). +Now that all the terms required to **computed to actuator forces required to generate such a trajectory** is computed, let us define \\(\bm{\mathcal{F}}\\) as the resulting Cartesian wrench applied to the moving platform. +This wrench can be calculated from the summation of all inertial and external forces **excluding the actuator torques** \\(\bm{\tau}\\) in the closed-form dynamic formulation [eq:closed_form_dynamic_stewart_wanted](#eq:closed_form_dynamic_stewart_wanted). + +By this definition, \\(\bm{\mathcal{F}}\\) can be viewed as the projector of the actuator forces acting on the manipulator, mapped to the Cartesian space. +Since there is no redundancy in actuation in the Stewart-Gough manipulator, the Jacobian matrix \\(\bm{J}\\), squared and actuator forces can be uniquely determined from this wrench, by \\(\bm{\tau} = \bm{J}^{-T} \bm{\mathcal{F}}\\), provided \\(\bm{J}\\) is non-singular. +Therefore, actuator forces \\(\bm{\tau}\\) are computed in the simulation from + +\begin{equation} + \bm{\tau} = \bm{J}^{-T} \left( \bm{M}(\bm{\mathcal{X}})\ddot{\bm{\mathcal{X}}} + \bm{C}(\bm{\mathcal{X}}, \dot{\bm{\mathcal{X}}})\dot{\bm{\mathcal{X}}} + \bm{G}(\bm{\mathcal{X}}) - \bm{\mathcal{F}}\_d \right) +\end{equation} + + + +{{< figure src="/ox-hugo/taghirad13_stewart_inverse_dynamics.png" caption="Figure 14: Flowchart of inverse dynamics implementation sequence" >}} + + +### Virtual Work Formulation {#virtual-work-formulation} + + +### Lagrange Formulation {#lagrange-formulation} + +\begin{equation} + K(\bm{\mathcal{X}}, \dot{\bm{\mathcal{X}}}) = \frac{1}{2} \dot{\bm{\mathcal{X}}}^T \bm{M}(\bm{\mathcal{X}}) \dot{\bm{\mathcal{X}}} +\end{equation} + +\begin{equation} + \bm{G}(\bm{\mathcal{X}}) = \frac{\partial P(\bm{\mathcal{X}})}{\partial \bm{\mathcal{X}}} +\end{equation} + +\begin{equation} + \bm{C}(\bm{\mathcal{X}}, \dot{\bm{\mathcal{X}}}) = \frac{1}{2} (\dot{\bm{M}} + \bm{U}^T - \bm{U}) +\end{equation} + + +## Motion Control {#motion-control} + + + + +### Introduction {#introduction} + +Parallel robots are designed for two different types of applications.
+ +In the first type, the moving platform of the robot accurately follows a **desired position and orientation** path in a specific time frame, while **no interacting forces** need to be applied to the environment.
+ +The second type of application include situations where the robot moving platform is in **contact with a stiff environment** (e.g. precision machining). +In such application, the contact force describe the state of interaction more effectively than the position and orientation of the moving platform. +The problem of **force control** can be described as to derive the actuator forces for such a manipulator required to generate a prescribed desired wrench (force and torque) at the manipulator moving platform, while the manipulator is performing its motion.
+ +Although a multiple degrees-of-freedom robotic manipulator can usually be represented by a MIMO and nonlinear model, many industrial controllers for such robots consist of a number of linear controller designed to **control individual joint motions**. +One of the reasons why such decentralization can perform well in practice is the use of large gear reductions in robot actuators, which significantly reduces the coupling and non linear behavior of robot dynamics.
+ +However, using advanced techniques in nonlinear and MIMO control permits to overcome limitations of the SISO approach. + + +### Controller Topology {#controller-topology} + + + +
+
+ +In motion control of parallel manipulator, it is assumed that the controller computes the **required actuator forces** or torques to cause the robot motion to follow a desired position and orientation trajectory. + +
+ +Let us use the motion variables as the generalized coordinate of the moving platform defined by \\(\bm{\mathcal{X}} = [\bm{x}\_P, \bm{\theta}]^T\\), in which the linear motion is represented by \\(\bm{x}\_p = [x\_p, y\_p, z\_p]^T\\), while the moving platform orientation is represented by **screw coordinates** \\(\bm{\theta} = \theta[s\_x, s\_y, s\_z]^T = [\theta\_x, \theta\_y, \theta\_z]^T\\).
+ +Consider the general closed-form dynamics formulation of a parallel robot + +\begin{equation} + \tcmbox{\bm{M}(\bm{\mathcal{X}})\ddot{\bm{\mathcal{X}}} + \bm{C}(\bm{\mathcal{X}}, \dot{\bm{\mathcal{X}}})\dot{\bm{\mathcal{X}}} + \bm{G}(\bm{\mathcal{X}}) = \bm{\mathcal{F}}} +\end{equation} + +where + +- \\(\bm{M}(\bm{\mathcal{X}})\\) denotes the mass matrix +- \\(\bm{C}(\bm{\mathcal{X}}, \dot{\bm{\mathcal{X}}})\\) denotes the Coriolis and centrifugal matrix +- \\(\bm{G}(\bm{\mathcal{X}})\\) denotes the gravity vector +- \\(\bm{\mathcal{F}}\\) denotes the generalized forces applied to the moving platform center of mass + +The generalized forces can be decomposed as follow +\\[ \bm{\mathcal{F}} = \bm{J}^T \bm{\tau} + \bm{\mathcal{F}}\_d \\] +with + +- \\(\bm{J}\\) is the Jacobian +- \\(\bm{\tau}\\) are the actuator forces +- \\(\bm{\mathcal{F}}\_d\\) are any external wrenches + +
+
+ +**Control topology** is referred to the **structure of the control system** used to compute the actuator forces/torques from the measurements, and the required pre and post processing. + +
+ +For motion control of a manipulator, the controller has to compute the actuator force/torques required to cause the motion of the moving platform according to the desired trajectory. +In general, the desired motion of the moving platform may be represented by the desired generalized coordinate of the manipulator, denoted by \\(\bm{\mathcal{X}}\_d\\).
+ +To perform such motion in closed loop, it is necessary to **measure the output motion** \\(\bm{\mathcal{X}}\\) of the manipulator by an instrumentation system. +Such instrumentation usually consists of two subsystems: the first subsystem may use accurate accelerometers, or global positioning systems to calculate the position of a point on the moving platform; and a second subsystem may use inertial or laser gyros to determine orientation of the moving platform.
+ +Figure [fig:general_topology_motion_feedback](#fig:general_topology_motion_feedback) shows the general topology of a motion controller using direct measurement of the motion variable \\(\bm{\mathcal{X}}\\), as feedback in the closed-loop system. +In such a structure, the measured position and orientation of the manipulator is compared to its desired value to generate the **motion error vector** \\(\bm{e}\_\mathcal{X}\\). +The controller uses this error information to generate suitable commands for the actuators to minimize the tracking error.
+ + + +{{< figure src="/ox-hugo/taghirad13_general_topology_motion_feedback.png" caption="Figure 15: The general topology of motion feedback control: motion variable \\(\bm{\mathcal{X}}\\) is measured" >}} + +However, it is usually much **easier to measure the active joint variable** rather than measuring the final position and orientation of the moving platform. +The relation between the **joint variable** \\(\bm{q}\\) and **motion variable** of the moving platform \\(\bm{\mathcal{X}}\\) is dealt with the **forward and inverse kinematics**. +The relation between the **differential motion variables** \\(\dot{\bm{q}}\\) and \\(\dot{\bm{\mathcal{X}}}\\) is studied through the **Jacobian analysis**.
+ +It is then possible to use the forward kinematic analysis to calculate \\(\bm{\mathcal{X}}\\) from the measured joint variables \\(\bm{q}\\), and one may use the control topology depicted in Figure [fig:general_topology_motion_feedback_bis](#fig:general_topology_motion_feedback_bis) to implement such a controller. + + + +{{< figure src="/ox-hugo/taghirad13_general_topology_motion_feedback_bis.png" caption="Figure 16: The general topology of motion feedback control: the active joint variable \\(\bm{q}\\) is measured" >}} + +In this topology, the forward kinematic analysis of the manipulator has to be performed to implement the feedback loop. +As described earlier, this is a **complex task** for parallel manipulators. +It is even more complex when a solution has to be found in real time.
+ +However, as shown herein before, the inverse kinematic analysis of parallel manipulators is much easier to carry out. +To overcome the implementation problem of the control topology in Figure [fig:general_topology_motion_feedback_bis](#fig:general_topology_motion_feedback_bis), another control topology is usually implemented for parallel manipulators. + +In this topology, depicted in Figure [fig:general_topology_motion_feedback_ter](#fig:general_topology_motion_feedback_ter), the desired motion trajectory of the robot \\(\bm{\mathcal{X}}\_d\\) is used in an **inverse kinematic analysis** to find the corresponding desired values for joint variable \\(\bm{q}\_d\\). +Hence, the controller is designed based on the **joint space error** \\(\bm{e}\_q\\). + + + +{{< figure src="/ox-hugo/taghirad13_general_topology_motion_feedback_ter.png" caption="Figure 17: The general topology of motion feedback control: the active joint variable \\(\bm{q}\\) is measured, and the inverse kinematic analysis is used" >}} + +Therefore, the **structure and characteristics** of the controller in this topology is totally **different** from that given in the first two topologies. + +The **input and output** of the controller depicted in Figure [fig:general_topology_motion_feedback_ter](#fig:general_topology_motion_feedback_ter) are **both in the joint space**. +However, this is not the case in the previous topologies where the input to the controller is the motion error in task space, while its output is in the joint space. + +For the topology in Figure [fig:general_topology_motion_feedback_ter](#fig:general_topology_motion_feedback_ter), **independent controllers** for each joint may be suitable.
+ +To generate a **direct input to output relation in the task space**, consider the topology depicted in Figure [fig:general_topology_motion_feedback_quater](#fig:general_topology_motion_feedback_quater). +A force distribution block is added which maps the generated wrench in the task space \\(\bm{\mathcal{F}}\\), to its corresponding actuator forces/torque \\(\bm{\tau}\\). + + + +{{< figure src="/ox-hugo/taghirad13_general_topology_motion_feedback_quater.png" caption="Figure 18: The general topology of motion feedback control in task space: the motion variable \\(\bm{\mathcal{X}}\\) is measured, and the controller output generates wrench in task space" >}} + +For a fully parallel manipulator such as the Stewart-Gough platform, this mapping can be constructed from the **Jacobian** transpose of the manipulator: +\\[ \bm{\mathcal{F}} = \bm{J}^T \bm{\tau}; \quad \bm{\tau} = \bm{J}^{-T} \bm{\mathcal{F}} \\] + + +### Motion Control in Task Space {#motion-control-in-task-space} + + + + +#### Decentralized PD Control {#decentralized-pd-control} + +In the control structure in Figure [fig:decentralized_pd_control_task_space](#fig:decentralized_pd_control_task_space), a number of linear PD controllers are used in a feedback structure on each error component. +The decentralized controller consists of **six disjoint linear controllers** acting on each error component \\(\bm{e}\_x = [e\_x,\ e\_y,\ e\_z,\ e\_{\theta\_x},\ e\_{\theta\_y},\ e\_{\theta\_z}]\\). +The PD controller is denoted by \\(\bm{K}\_d s + \bm{K}\_p\\), in which \\(\bm{K}\_d\\) and \\(\bm{K}\_p\\) are \\(6 \times 6\\) **diagonal matrices** denoting the derivative and proportional controller gains for each error term. + + + +{{< figure src="/ox-hugo/taghirad13_decentralized_pd_control_task_space.png" caption="Figure 19: Decentralized PD controller implemented in task space" >}} + +Hence, by this structure, each tracking error component is **treated separately**. +The output of the controller is denoted by \\(\bm{\mathcal{F}} = [F\_x\ F\_y\ F\_z\ \tau\_x\ \tau\_y\ \tau\_z]\\).
+ +In practice, the calculated output wrench is transformed into actuator forces through the force distribution block. +This mapping is implemented through inverse of the manipulator **Jacobian** transpose by \\(\bm{\tau} = \bm{J}^{-T} \bm{\mathcal{F}}\\).
+ +Different alternatives of linear controllers can be used instead of the PD controller used in this structure, however PD controller is the simplest form which can preserve the manipulator stability while providing suitable tracking performance.
+ +The proposed decentralized PD controller is very simple in structure and therefore easily implementable. +The design of such a controller needs no detailed information on the manipulator dynamics. +The controller gains are generally tuned experimentally based on physical realization of the controller by trial and error. + + +#### Feed Forward Control {#feed-forward-control} + +A feedforward wrench denoted by \\(\bm{\mathcal{F}}\_{ff}\\) may be added to the decentralized PD controller structure as depicted in Figure [fig:feedforward_control_task_space](#fig:feedforward_control_task_space). +This term is generated from the dynamic model of the manipulator in the task space, represented in a closed form by the following equation: +\\[ \bm{\mathcal{F}}\_{ff} = \bm{\hat{M}}(\bm{\mathcal{X}}\_d)\ddot{\bm{\mathcal{X}}}\_d + \bm{\hat{C}}(\bm{\mathcal{X}}\_d, \dot{\bm{\mathcal{X}}}\_d)\dot{\bm{\mathcal{X}}}\_d + \bm{\hat{G}}(\bm{\mathcal{X}}\_d) \\] + + + +{{< figure src="/ox-hugo/taghirad13_feedforward_control_task_space.png" caption="Figure 20: Feed forward wrench added to the decentralized PD controller in task space" >}} + +The desired trajectory in task space \\(\bm{\mathcal{X}}\_d\\), and its derivatives \\(\dot{\bm{\mathcal{X}}}\_d\\), \\(\ddot{\bm{\mathcal{X}}}\_d\\) are the required inputs for the feedforward block. +This term is called feedforward since no online information of the output motion trajectory \\(\bm{\mathcal{X}}\\) is needed for its computation.
+ +In order to generate this term, dynamic formulation of the robots and its kinematic and dynamic parameters are needed. +In practice, exact knowledge of dynamic matrices are not available, and therefore, **estimate** of these matrices are used in practice, denoted by \\(\hat{\bm{M}}\\), \\(\hat{\bm{C}}\\) and \\(\hat{\bm{G}}\\).
+ +The information required to generate the feedforward wrench \\(\bm{\mathcal{F}}\_{ff}\\) is usually available beforehand and can be derived offline. +The closed-loop dynamic formulation for the manipulator becomes: + +\begin{equation} + \begin{aligned} + \bm{M}(\bm{\mathcal{X}})\ddot{\bm{\mathcal{X}}} &+ \bm{C}(\bm{\mathcal{X}}, \dot{\bm{\mathcal{X}}})\dot{\bm{\mathcal{X}}} + \bm{G}(\bm{\mathcal{X}}) \\\\\\ + &= \bm{\mathcal{F}} + \bm{\mathcal{F}}\_d \\\\\\ + &= \bm{\mathcal{F}}\_{pd} + \bm{\mathcal{F}}\_{ff} + \bm{\mathcal{F}}\_d \\\\\\ + &= \bm{K}\_d \dot{\bm{e}}\_x + \bm{K}\_p \bm{e}\_x + \bm{\mathcal{F}}\_{ff} + \bm{\hat{M}}\ddot{\bm{\mathcal{X}}}\_d + \bm{\hat{C}}\dot{\bm{\mathcal{X}}}\_d + \bm{\hat{G}} + \end{aligned} +\end{equation} + +If the knowledge of the dynamic matrices is complete, we may assume that \\(\hat{\bm{M}} = \bm{M}\\), \\(\hat{\bm{C}} = \bm{C}\\) and \\(\hat{\bm{G}} = \bm{G}\\). +Furthermore, if we consider that the controller performs well such that \\(\bm{\mathcal{X}}(t) \simeq \bm{\mathcal{X}}\_d(t)\\) and \\(\dot{\bm{\mathcal{X}}}(t) \simeq \dot{\bm{\mathcal{X}}}\_d(t)\\), the simplified closed-loop dynamics become: + +\begin{equation} + \begin{aligned} + \bm{M} (\ddot{\bm{\mathcal{X}}}\_d - \ddot{\bm{\mathcal{X}}}) + \bm{K}\_d \dot{\bm{e}}\_x + \bm{K}\_p \bm{e}\_x + \bm{\mathcal{F}}\_d &= 0 \\\\\\ + \bm{M} \ddot{\bm{e}}\_x + \bm{K}\_d \dot{\bm{e}}\_x + \bm{K}\_p \bm{e}\_x + \bm{\mathcal{F}}\_d &= 0 + \end{aligned} +\end{equation} + +This equation implies that, if the mentioned assumptions hold, the **error dynamics** satisfies a set of **second-order system** in the presence of disturbance. +By choosing appropriate gains for PD controller, the transient and steady-state performance of tracking error can be designed so as to satisfy the application requirements.
+ +Note that except the mass matrix, the error dynamic terms are all **configuration independent**, and therefore, it is much **easier to tune the PD controller gains** to work well within the whole workspace of the robot.
+ +However, this method faces a number of **limitations** in practice. +The most important limitation of this control technique is the **stringent assumption of a complete knowledge requirement of the dynamic matrices**. +In practice, derivation of these matrices is a prohibitive task.
+ +Finally, because of the dependency of the mass matrix to the configuration of the robot, the error dynamics are not completely decoupled. +This means that correction in one error component may be considered as a disturbance effect to the other components. +To overcome these limitations, inverse dynamic approach is given in the following section. + + +#### Inverse Dynamics Control {#inverse-dynamics-control} + +
+
+ +In inverse dynamics control (IDC), nonlinear dynamics of the model is used to add a **corrective term** to the decentralized PD controller. +By this means, **nonlinear and coupling behavior of the robotic manipulator is significantly attenuated**, and therefore, the performance of linear controller is greatly improved. + +
+ +General structure of IDC applied to a parallel manipulator is depicted in Figure [fig:inverse_dynamics_control_task_space](#fig:inverse_dynamics_control_task_space). +A corrective wrench \\(\bm{\mathcal{F}}\_{fl}\\) is added in a **feedback structure** to the closed-loop system, which is calculated from the Coriolis and centrifugal matrix and gravity vector of the manipulator dynamic formulation. + +Furthermore, mass matrix is added in the forward path in addition to the desired trajectory acceleration \\(\ddot{\bm{\mathcal{X}}}\_d\\). + +As for the feedforward control, the **dynamics and kinematic parameters of the robot are needed**, and in practice estimates of these matrices are used.
+ + + +{{< figure src="/ox-hugo/taghirad13_inverse_dynamics_control_task_space.png" caption="Figure 21: General configuration of inverse dynamics control implemented in task space" >}} + +The controller output wrench applied to the manipulator may be derived as follows: + +\begin{subequations} +\begin{align} + \bm{\mathcal{F}} &= \hat{\bm{M}}(\bm{\mathcal{X}}) \bm{a} + \bm{\mathcal{F}}\_{fl} \\\\\\ + &= \hat{\bm{M}}(\bm{\mathcal{X}}) \bm{a} + \hat{\bm{C}}(\bm{\mathcal{X}}, \dot{\bm{\mathcal{X}}}) \dot{\bm{\mathcal{X}}} + \hat{\bm{G}}(\bm{\mathcal{X}}) \\\\\\ + \bm{a} &= \ddot{\bm{\mathcal{X}}}\_d + \bm{K}\_d \dot{\bm{e}}\_x + \bm{K}\_p \bm{e}\_x +\end{align} +\end{subequations} + +The closed-loop dynamic formulation for the manipulator becomes: + +\begin{equation} + \begin{aligned} + \bm{M}(\bm{\mathcal{X}})\ddot{\bm{\mathcal{X}}} &+ \bm{C}(\bm{\mathcal{X}}, \dot{\bm{\mathcal{X}}})\dot{\bm{\mathcal{X}}} + \bm{G}(\bm{\mathcal{X}}) \\\\\\ + &= \bm{\mathcal{F}} + \bm{\mathcal{F}}\_d \\\\\\ + &= \hat{\bm{M}}(\bm{\mathcal{X}}) \left(\ddot{\bm{\mathcal{X}}}\_d + \bm{K}\_d \dot{\bm{e}}\_x + \bm{K}\_p \bm{e}\_x \right) \\\\\\ + &\quad + \hat{\bm{C}}(\bm{\mathcal{X}}, \dot{\bm{\mathcal{X}}})\dot{\bm{\mathcal{X}}} + \hat{\bm{G}}(\bm{\mathcal{X}}) + \bm{\mathcal{F}}\_d \\\\\\ + \end{aligned} +\end{equation} + +If the knowledge of the dynamic matrices is complete, the closed-loop dynamic formulation simplifies to: + +\begin{equation} + \hat{\bm{M}}(\bm{\mathcal{X}}) \left(\ddot{\bm{e}}\_d + \bm{K}\_d \dot{\bm{e}}\_x + \bm{K}\_p \bm{e}\_x \right) + \bm{\mathcal{F}}\_d = 0 +\end{equation} + +This control technique is very popular in practice because of the fact that this technique can significantly **linearize and decouple dynamic formulation of the closed-loop error dynamics**. +Furthermore, the error dynamic terms are all **configuration independent**, and therefore, it is much easier to tune the PD controller gains for suitable performance in the whole workspace of the robot.
+ +However, note that for a good performance, and **accurate model of the system is required**, and the overall procedure is **not robust to modeling uncertainty**. +Furthermore, this technique is computationally intensive in terms of the online computations needed to carry out the closed-loop control structure. + + +#### Partial Linearization IDC {#partial-linearization-idc} + +Inverse dynamics control has several features making it very attractive in practice. +However, to apply this method, complete knowledge of the dynamic formulation matrices is required. +This requirement has the main drawbacks that the dynamic formulation of the parallel manipulator is a complicated step to be carried out.
+ +To implement all the terms in IDC structure, not only the structure and components of such matrices must be carefully determined, but also the kinematics and inertial parameters of the robot are needed to be identified and calibrated. +This step requires the use of high-precision calibration equipment which are not usually accessible. +Finally, if all the terms and parameters are well known, implementation of full inverse dynamic linearization is computationally intensive. + +These are the reasons why, in practice, IDC control is extended to different forms where the above-mentioned stringent requirements are reduced.
+ +To develop the simplest possible implementable IDC, let us recall dynamic formulation complexities: + +- the manipulator mass matrix \\(\bm{M}(\bm{\mathcal{X}})\\) is derived from kinetic energy of the manipulator (Eq. [eq:kinetic_energy](#eq:kinetic_energy)) +- the gravity vector \\(\bm{G}(\bm{\mathcal{X}})\\) is derived from potential energy (Eq. [eq:gravity_vectory](#eq:gravity_vectory)) +- the Coriolis and centrifugal matrix \\(\bm{C}(\bm{\mathcal{X}}, \dot{\bm{\mathcal{X}}})\\) is derived from Eq. [eq:gravity_vectory](#eq:gravity_vectory) + +The computation of the Coriolis and centrifugal matrix is more intensive than that of the mass matrix. +Gravity vector is more easily computable. + +However, it is shown that certain properties hold for mass matrix, gravity vector and Coriolis and centrifugal matrix, which might be directly used in the control techniques developed for parallel manipulators. +One of the most important properties of dynamic matrices is the skew-symmetric property of the matrix \\(\dot{\bm{M}} - 2 \bm{C}\\) .
+ +Consider dynamic formulation of parallel robot given in Eq. [eq:closed_form_dynamic_formulation](#eq:closed_form_dynamic_formulation), in which the skew-symmetric property of dynamic matrices is satisfied. +The simplest form of IDC control effort \\(\bm{\mathcal{F}}\\) consists of: +\\[ \bm{\mathcal{F}} = \bm{\mathcal{F}}\_{pd} + \bm{\mathcal{F}}\_{fl} \\] +in which the first term \\(\bm{\mathcal{F}}\_{pd}\\) is generated by the simplified PD form on the motion error: +\\[ \bm{\mathcal{F}}\_{pd} = \bm{K}\_d \dot{\bm{e}}\_x + \bm{K}\_p + \bm{e}\_x \\] + +The second term \\(\bm{\mathcal{F}}\_{fl}\\) is considered to be only the gravity vector of the manipulator \\(\bm{G}(\bm{\mathcal{X}})\\), at any configuration, and the computationally intensive Coriolis and centrifugal term is not used: +\\[ \bm{\mathcal{F}}\_{fl} = \bm{G}(\bm{\mathcal{X}}) \\] + +Note that for an appreciable tracking performance with no static error at steady state, it is required to have complete knowledge of **only** the gravity term. +By this means, computations required in this control technique are significantly less than that of the general IDC.
+ +Despite the simple structure of such a controller, the resulting control technique is very well performed, especially at steady state. +We can show that this control topology achieves asymptotic tracking for a constant desired trajectory motion, that is, \\(\dot{\bm{\mathcal{X}}}\_d = 0\\).
+ +This reveals the fact that even if the mass matrix and Coriolis and centrifugal matrix are not used in the feedback, and the closed-loop dynamics is not completely linearized, the PD control structure with gravity compensation can still lead to asymptotic tracking. +However, to suitable transient performance, more information of the system dynamics must be used in the linearization technique given in IDC. + + +### Robust and Adaptative Control {#robust-and-adaptative-control} + +Inverse dynamics control faces the stringent requirement that for a good performance, an **accurate model** of the system is required, and the overall procedure is **not robust** to modeling uncertainty. +Furthermore, this technique is computationally intensive in terms of online computation needed to carry out the closed-loop control structure. +The proposed modified inverse dynamics control, while being beneficial in terms of computational cost, is not suitable in terms of a closed-loop transient performance.
+ +Another approach to modify IDC is to consider a complete linearization, but **assume that complete knowledge of dynamic formulation matrices is not available**. +To **compensate for the lack of knowledge**, two advanced control methods, namely **robust** and **adaptive control** are proposed: + +- In the **robust approach**, a **fixed controller** is designed to satisfy the control objectives for the **worst possible case of modeling uncertainty** and disturbance wrenches. +- In the **adaptive approach**, the estimates of dynamic formulation matrices are **updated** such that the difference between the true values of these matrices to their estimates converges to zero. + +A global understanding of the trade-offs involved in each method is needed to employ either of them in practice. + + +#### Robust Inverse Dynamics Control {#robust-inverse-dynamics-control} + +Various sources of uncertainties such as unmodelled dynamics, unknown parameters, calibration error, unknown disturbance wrenches, and varying payloads may exist, and are not seen in dynamic model of the manipulator. + +To consider these modeling uncertainty in the closed-loop performance of the manipulator, recall the general closed-form dynamic formulation of the manipulator given in Eq. [eq:closed_form_dynamic_formulation](#eq:closed_form_dynamic_formulation), and modify the inverse dynamics control input \\(\bm{\mathcal{F}}\\) as + +\begin{align\*} + \bm{\mathcal{F}} &= \hat{\bm{M}}(\bm{\mathcal{X}}) \bm{a}\_r + \hat{\bm{C}}(\bm{\mathcal{X}}, \dot{\bm{\mathcal{X}}}) \dot{\bm{\mathcal{X}}} + \hat{\bm{G}}(\bm{\mathcal{X}})\\\\\\ + \bm{a}\_r &= \ddot{\bm{\mathcal{X}}}\_d + \bm{K}\_d \dot{\bm{e}}\_x + \bm{K}\_p \bm{e}\_x + \bm{\delta}\_a +\end{align\*} + +in which \\(\bm{a}\_r\\) is the robustified control input. + +Comparing this equation to the usual IDC, a robustifying term \\(\bm{\delta}\_a\\) is added to compensate for modeling uncertainties. + +Note that, as defined earlier, the notation \\(\hat{(.)}\\) represents the estimated value of \\((.)\\) and \\(\tilde{(.)}\\) is defined as the error mismatch between the estimated value and the true value as \\(\tilde{(.)} = \hat{(.)}- (.)\\). + +In a similar manner \\(\tilde{(.)}\\) notation may be applied to the motion variables as +\\[ \tilde{\bm{\mathcal{X}}} = \bm{\mathcal{X}} - \bm{\mathcal{X}}\_d = - \bm{e}\_x \\] + +The closed-loop dynamic formulation of the manipulator can be written as: +\\[ \ddot{\bm{\mathcal{X}}} = \bm{a}\_r + \bm{\eta}(\bm{\mathcal{X}}, \dot{\bm{\mathcal{X}}}, \bm{a}\_r) \\] +in which +\\[ \bm{\eta} = \bm{M}^{-1} \left( \tilde{\bm{M}} \bm{a}\_r + \tilde{\bm{C}} \dot{\bm{\mathcal{X}}} + \tilde{\bm{G}} \right) \\] +is a measure of modeling uncertainty. + + + +{{< figure src="/ox-hugo/taghirad13_robust_inverse_dynamics_task_space.png" caption="Figure 22: General configuration of robust inverse dynamics control implemented in the task space" >}} + + +#### Adaptive Inverse Dynamics Control {#adaptive-inverse-dynamics-control} + + + +{{< figure src="/ox-hugo/taghirad13_adaptative_inverse_control_task_space.png" caption="Figure 23: General configuration of adaptative inverse dynamics control implemented in task space" >}} + + +### Motion Control in Joint Space {#motion-control-in-joint-space} + +Although the motion control schemes developed in section [sec:control_task_space](#sec:control_task_space) are very effective for tracking performance, they suffer from an implementation constraint that the motion variable \\(\bm{\mathcal{X}}\\) must be measured in practice.
+ +If this measurement is available without any doubt, such topologies are among the best routines to be implemented in practice. +However, as explained in Section [sec:control_topology](#sec:control_topology), in many practical situations measurement of the motion variable \\(\bm{\mathcal{X}}\\) is difficult or expensive, and usually just the active joint variables \\(\bm{q}\\) are measured. +In such cases, the controllers developed in the joint space may be recommended for practical implementation.
+ +To generate a direct input to output relation in the joint space, consider the topology depicted in Figure [fig:general_topology_motion_feedback_bis](#fig:general_topology_motion_feedback_bis). +In this topology, the controller input is the joint variable error vector \\(\bm{e}\_q = \bm{q}\_d - \bm{q}\\), and the controller output is directly the actuator force vector \\(\bm{\tau}\\), and hence there exists a **one-to-one correspondence between the controller input to its output**.
+ +The general form of dynamic formulation of parallel robot is usually given in the task space. +For motion control in joint space, we need to transform the dynamic formulation in the joint space, by which the actuator forces \\(\bm{\tau}\\) are directly related to the active joint variables \\(\bm{q}\\). + + +#### Dynamic Formation in the Joint Space {#dynamic-formation-in-the-joint-space} + +The relation between the task space variables to their counterparts in the joint space can be derived by forward and inverse kinematics relations. +Although both analyses involve solution to a set of non-linear equations, for parallel manipulators, inverse kinematic solution proves to be much easier to obtain than that of forward kinematic solution.
+ +This relation in **differential kinematics** is much simpler and can be completely determined by the Jacobian matrix: +\\[ \tcmbox{\dot{\bm{q}} = \bm{J} \dot{\bm{\mathcal{X}}} \Longrightarrow \dot{\bm{\mathcal{X}}} = \bm{J}^{-1} \dot{\bm{q}}} \\] + +The acceleration variables are then: +\\[ \ddot{\bm{q}} = \dot{\bm{J}} \dot{\bm{\mathcal{X}}} + \bm{J} \ddot{\mathcal{X}} \Longrightarrow \ddot{X} = \bm{J}^{-1} \ddot{\bm{q}} - \bm{J}^{-1} \dot{\bm{J}} \dot{\bm{\mathcal{X}}} \\] + +Furthermore, the relation between the actuator force vector \\(\bm{\tau}\\) to the corresponding task space wrench is given by: +\\[ \tcmbox{\bm{\mathcal{F}} = \bm{J}^T \bm{\tau} \Longrightarrow \bm{\tau} = \bm{J}^{-T} \bm{\mathcal{F}}} \\] + +Substituting \\(\dot{\bm{\mathcal{X}}}\\) and \\(\ddot{\bm{\mathcal{X}}}\\) from the above equations into the dynamic formulation of the parallel robot gives: + +\begin{equation\*} + \begin{aligned} + & \left( \bm{J}^{-T} \bm{M} \bm{J}^{-1} \right) \ddot{\bm{q}} \\\\\\ + & \quad + \bm{J}^{-T} \left( \bm{C} - \bm{M} \bm{J}^{-1} \dot{\bm{J}} \right) \bm{J}^{-1} \dot{\bm{q}} \\\\\\ + & \quad + \bm{J}^{-T} \bm{G} + \bm{J}^{-T} \bm{\mathcal{F}}\_d = \bm{\tau} + \end{aligned} +\end{equation\*} + +
+
+ +\begin{equation} + \bm{M}\_q \ddot{\bm{q}} + \bm{C}\_q \dot{\bm{q}} + \bm{G}\_q + \bm{\tau}\_d = \bm{\tau} \label{eq:dynamics\_joint\_space} +\end{equation} + +with: + +\begin{subequations} +\begin{align} + \bm{M}\_q =& \bm{J}^{-T} \bm{M} \bm{J}^{-1} \\\\\\ + \bm{D}\_q =& \bm{J}^{-T} \left( \bm{C} - \bm{M} \bm{J}^{-1} \dot{\bm{J}} \right) \bm{J}^{-1} \\\\\\ + \bm{G}\_q =& \bm{J}^{-T} \bm{G} \\\\\\ + \bm{\tau}\_q =& \bm{J}^{-T} \bm{\mathcal{F}}\_d +\end{align} +\end{subequations} + +
+ +Equation [eq:dynamics_joint_space](#eq:dynamics_joint_space) represents the closed form dynamic formulation of a general parallel robot in the joint space.
+ +Note that the dynamic matrices are **not** explicitly represented in terms of the joint variable vector \\(\bm{q}\\). +In fact, to fully derive these matrices, the Jacobian matrices must be computed and are generally derived as a function of the motion variables \\(\bm{\mathcal{X}}\\). +Furthermore, the main dynamic matrices are all functions of the motion variable \\(\bm{\mathcal{X}}\\). +Hence, in practice, to find the dynamic matrices represented in the joint space, **forward kinematics** should be solved to find the motion variable \\(\bm{\mathcal{X}}\\) for any given joint motion vector \\(\bm{q}\\).
+ +Since in parallel robots the forward kinematic analysis is computationally intensive, there exist inherent difficulties in finding the dynamic matrices in the joint space as an explicit function of \\(\bm{q}\\). +In this case it is possible to solve forward kinematics in an online manner, it is recommended to use the control topology depicted in [fig:general_topology_motion_feedback_bis](#fig:general_topology_motion_feedback_bis), and implement control law design in the task space.
+ +However, one implementable alternative to calculate the dynamic matrices represented in the joint space is to use the **desired motion trajectory** \\(\bm{\mathcal{X}}\_d\\) instead of the true value of motion vector \\(\bm{\mathcal{X}}\\) in the calculations. +This approximation significantly reduces the computational cost, with the penalty of having mismatch between the estimated values of these matrices to their true values. + + +#### Decentralized PD Control {#decentralized-pd-control} + +The first control strategy introduced in the joint space consists of the simplest form of feedback control in such manipulators. +In this control structure, depicted in Figure [fig:decentralized_pd_control_joint_space](#fig:decentralized_pd_control_joint_space), a number of PD controllers are used in a feedback structure on each error component. + +The PD controller is denoted by \\(\bm{K}\_d s + \bm{K}\_p\\), where \\(\bm{K}\_d\\) and \\(\bm{K}\_p\\) are \\(n \times n\\) **diagonal** matrices denoting the derivative and proportional controller gains, respectively.
+ + + +{{< figure src="/ox-hugo/taghirad13_decentralized_pd_control_joint_space.png" caption="Figure 24: Decentralized PD controller implemented in joint space" >}} + +By this structure, each tracking error component is **treated separately** by its disjoint PD controller. +The proposed decentralized PD controller is very simple in structure, and therefore very easy to be implemented on the manipulator. +The design of such a controller **needs no detailed information on the manipulator dynamic formulation and parameters**. +However, the tracking performance of such a controller is relatively poor, and **static tracking errors** might be unavoidable. +Also, the performance of the closed-loop system is configuration dependent.
+ +In practice, the gains are tuned experimentally and obtained as a trade-off between transient behavior and steady-state errors at different configurations. +As the dynamics of the system in the joint space is configuration dependent, finding suitable controller gains to result in required performance in all configurations is a difficult task.
+ +The performance of the controller to attenuate measurement noise and external disturbance wrenches are also poor in practice. +To remedy these shortcomings, some modifications have been proposed to this structure and further described. + + +#### Feedforward Control {#feedforward-control} + +The tracking performance of the simple PD controller implemented in the joint space is usually not sufficient at different configurations. +To improve the tracking performance, a feedforward actuator force denoted by \\(\bm{\tau}\_{ff}\\) may be added to the structure of the controller as depicted in Figure [fig:feedforward_pd_control_joint_space](#fig:feedforward_pd_control_joint_space). + + + +{{< figure src="/ox-hugo/taghirad13_feedforward_pd_control_joint_space.png" caption="Figure 25: Feed forward actuator force added to the decentralized PD controller in joint space" >}} + +The feedforward term is generated from the dynamic formulation of the manipulator. +The desired trajectory in the task space \\(\bm{\mathcal{X}}\_d\\) and its derivatives \\(\dot{\bm{\mathcal{X}}}\_d\\), \\(\ddot{\bm{\mathcal{X}}}\_d\\) are thus required.
+ +In practice, exact knowledge of dynamic matrices are not available, and therefore, estimates of these matrices are used in this derivation denoted by \\(\hat{\bm{M}}\\), \\(\hat{\bm{C}}\\) and \\(\hat{\bm{G}}\\).
+ +The information required to generate the feedforward actuator force \\(\bm{\tau}\_{ff}\\) is usually available beforehand, and in such a case, the feedforward term corresponding to a given trajectory can be **determined off-line**, while the computation of the decentralized feedback term would be executed online.
+ +If complete information of the dynamic matrices is available, and if we assume that the system is performing well, meaning that \\(\bm{\mathcal{X}}(t) \simeq \bm{\mathcal{X}}\_d(t)\\) and \\(\dot{\bm{\mathcal{X}}}(t) \simeq \dot{\bm{\mathcal{X}}}\_d(t)\\), we can write the closed loop dynamics as follow: +\\[ \bm{M}\_q \ddot{\bm{e}}\_q + \bm{K}\_d \dot{\bm{e}}\_q + \bm{K}\_p \bm{e}\_q = \bm{\tau}\_d \\] + +The error dynamics satisfy a set of second-order differential equations in the presence of disturbance. +Therefore, by choosing appropriate gains of the PD controller, the transient and steady-state performance of the tracking error can be suitably designed. + +Note that except for the mass matrix, the error dynamics terms are all **configuration independent**, and therefore, it is much easier to tune the PD controller gains to work well in the whole workspace of the robot in such a structure.
+ +However, this method suffers from a number of **limitations** in practice. +The most important limitation is the **stringent assumption of the complete information requirement of dynamics matrices**. +Furthermore, even is all the assumption hold, because of the configuration dependence of the mass matrix, the error dynamics is still not completely decoupled. +This means that correction in one component may be considered as a disturbance effect to the other components. +To overcome these limitations, the inverse dynamic approach has been developed and is given in the following section. + + +#### Inverse Dynamics Control {#inverse-dynamics-control} + +As seen in the previous section, the tracking performance of a decentralized PD controller implemented in the joint space is not uniform at different configurations. +To compensate for such effects, a feedforward torque is added to the structure of the controller, by which the shortcomings of the decentralized controller is partially remedied. +However, the closed-loop performance still faces a number of limitations, which cannot be completely remedied because of the inherent conditions on feedforward structure of that proposed controller. +To overcome these limitations, in this section, a control technique based on **inverse dynamic feedback** of the manipulator in the joint space is presented.
+ +
+
+ +In the inverse dynamics control (IDC) strategy, the **nonlinear dynamics of the model is used to add a corrective term to the decentralized PD controller**. +By this means, the **nonlinear and coupling characteristics** of robotic manipulator is significantly **attenuated**, and therefore, the performance of linear controller is significantly improved. + +
+ +The general structure of inverse dynamics control applied to a parallel manipulator in the joint space is depicted in Figure [fig:inverse_dynamics_control_joint_space](#fig:inverse_dynamics_control_joint_space). + +A corrective torque \\(\bm{\tau}\_{fl}\\) is added in a **feedback** structure to the closed-loop system, which is calculated from the Coriolis and Centrifugal matrix, and the gravity vector of the manipulator dynamic formulation in the joint space. +Furthermore, the mass matrix is acting in the **forward path**, in addition to the desired trajectory acceleration \\(\ddot{\bm{q}}\_q\\). +Note that to generate this term, the **dynamic formulation** of the robot, and its **kinematic and dynamic parameters are needed**. +In practice, exact knowledge of dynamic matrices are not available, and there estimates are used.
+ + + +{{< figure src="/ox-hugo/taghirad13_inverse_dynamics_control_joint_space.png" caption="Figure 26: General configuration of inverse dynamics control implemented in joint space" >}} + +The controller output torque applied to the manipulator may be calculated by: + +\begin{subequations} +\begin{align} + \bm{\tau} &= \hat{\bm{M}}\_q \bm{a}\_q + \bm{\tau}\_{fl} \\\\\\ + \bm{\tau}\_{fl} &= \hat{\bm{C}}\_q \dot{\bm{q}} + \hat{\bm{G}}\_q \\\\\\ + \bm{a}\_q &= \ddot{\bm{q}}\_d + \bm{K}\_d \dot{\bm{e}}\_q + \bm{K}\_p \bm{e}\_q +\end{align} +\end{subequations} + +If the knowledge of dynamic matrices is complete, the closed-loop dynamic formulation is simplified to: +\\[ \hat{\bm{M}}\_q \left( \ddot{\bm{e}}\_q + \bm{K}\_d \dot{\bm{e}}\_q + \bm{K}\_p \bm{e}\_q \right) + \bm{\tau}\_d = 0 \\] + +This equation implies that if there exist complete knowledge of the dynamic matrices, the tracking error dynamic equation satisfies a set of second-order systems in the presence of disturbance. +Consider the case where no disturbance wrench is applied to the manipulator, as the mass matrix \\(\bm{M}\_q\\) is positive definite at all non-singular configurations, it can be inverted, and the error dynamics simplifies to: +\\[ \ddot{\bm{e}}\_q + \bm{K}\_d \dot{\bm{e}}\_q + \bm{K}\_p \bm{e}\_q = 0 \\] + +This control technique is very popular in practice because of the fact that it can significantly **linearize** and **decouple** the dynamic formulation of the closed-loop system for error dynamics components. +Furthermore, the error dynamic terms are all **configuration independent**, and therefore, it is **much easier to tune the PD controller gains** to perform well in the whole workspace of the robot.
+ +However, note that for a good performance, an **accurate model of the system is required**, and the overall procedure is **not robust** to model uncertainties. + + +### Summary of Motion Control Techniques {#summary-of-motion-control-techniques} + +In this section, a number of control techniques have been developed for parallel robots. +Based on the dynamic formulation given in Section [sec:dynamics](#sec:dynamics), many **model-based** control techniques have been developed for implementation in the task space as well as in the joint space. +These control techniques are presented from the simplest form of decentralized PD control to more advanced robust and adaptive inverse dynamics control. + +A summary of these techniques is given below. + + +##### Dynamic Formulations {#dynamic-formulations} + +The dynamic formulation of a parallel robot may be directly represented as a function of motion variable \\(\bm{\mathcal{X}}\\) in the task space as follows: + +\begin{equation\*} + \bm{M}(\bm{\mathcal{X}})\ddot{\bm{\mathcal{X}}} + \bm{C}(\bm{\mathcal{X}}, \dot{\bm{\mathcal{X}}})\dot{\bm{\mathcal{X}}} + \bm{G}(\bm{\mathcal{X}}) = \bm{\mathcal{F}} + \bm{\mathcal{F}}\_d +\end{equation\*} + +The dynamic formulation may be represented as a function of actuator motion variable \\(\bm{q}\\) as + +\begin{equation\*} + \bm{M}\_q \ddot{\bm{q}} + \bm{C}\_q \dot{\bm{q}} + \bm{G}\_q = \bm{\tau} + \bm{\tau}\_d +\end{equation\*} + +in which these two formulations are closely related to each other by the following relations: + +\begin{equation\*} + \begin{aligned} + \bm{M}\_q &= \bm{J}^{-T} \bm{M} \bm{J}^{-1} \\\\\\ + \bm{C}\_q &= \bm{J}^{-T} \left( \bm{C} - \bm{M}\bm{J}^{-1}\dot{\bm{J}} \right) \bm{J}^{-1} \\\\\\ + \bm{D}\_q &= \bm{J}^{-T} \bm{G} \\\\\\ + \bm{\tau}\_q &= \bm{J}^{-T} \bm{\mathcal{F}} + \end{aligned} +\end{equation\*} + + +##### Decentralized PD Control {#decentralized-pd-control} + +The simplest controller for a parallel robot can be considered as a decentralized PD controller being implemented individually on each error component. +If such a structure is implemented in the task space, the control effort is calculated by + +\begin{equation\*} + \bm{\mathcal{F}} = \bm{K}\_d \dot{\bm{e}}\_x + \bm{K}\_p \bm{e}\_x +\end{equation\*} + +and the actuator effort can be generally determined through a force distribution scheme. + +For a completely parallel manipulator, the actuator forces can be generated by \\(\bm{\tau} = \bm{J}^{-T} \bm{\mathcal{F}}\\) at non-singular configurations. + +Decentralized PD control can be directly implemented in the joint space by the following equation: + +\begin{equation\*} + \bm{\tau} = \bm{K}\_d \dot{\bm{e}}\_q + \bm{K}\_p \bm{e}\_q +\end{equation\*} + + +##### Feed Forward Control {#feed-forward-control} + +The reduce the performance limitations of simple PD control, the control effort may be enforced with a feed forward wrench given by + +\begin{equation\*} + \bm{\mathcal{F}} = \bm{\mathcal{F}}\_{pd} + \bm{\mathcal{F}}\_{ff} +\end{equation\*} + +in which + +\begin{equation\*} + \begin{aligned} + \bm{\mathcal{F}}\_{ff} &= \bm{K}\_d \dot{\bm{e}}\_x + \bm{K}\_p \bm{e}\_x \\\\\\ + &+ \hat{\bm{M}}(\bm{\mathcal{X}}\_d)\ddot{\bm{\mathcal{X}}}\_d + \hat{\bm{C}}(\bm{\mathcal{X}}\_d, \dot{\bm{\mathcal{X}}}\_d)\dot{\bm{\mathcal{X}}}\_d + \hat{\bm{G}}(\bm{\mathcal{X}}\_d) + \end{aligned} +\end{equation\*} + +where \\(\hat{\bm{M}}\\), \\(\hat{\bm{C}}\\) and \\(\hat{\bm{G}}\\) are estimation of the dynamic matrices. + +This controller can be implemented in joint space as follows + +\begin{equation\*} + \begin{aligned} + \bm{\tau} &= \bm{\tau}\_{pd} + \bm{\tau}\_{ff} \\\\\\ + &= \bm{K}\_d \dot{\bm{e}}\_q + \bm{K}\_p \bm{e}\_q + \bm{J}^{-T} \bm{\mathcal{F}}\_{ff} + \end{aligned} +\end{equation\*} + + +##### Inverse Dynamics Control {#inverse-dynamics-control} + +In the inverse dynamics control, the nonlinear dynamics of the model is used to add a corrective term to the decentralized PD controller. +If such a structure is implemented in the task space, the control effort is calculated by + +\begin{equation\*} + \begin{aligned} + \bm{\mathcal{F}} &= \hat{\bm{M}}(\bm{\mathcal{X}})\bm{a} + \hat{\bm{C}}(\bm{\mathcal{X}}, \dot{\bm{\mathcal{X}}})\dot{\bm{\mathcal{X}}} + \hat{\bm{G}}(\bm{\mathcal{X}}) \\\\\\ + \bm{a} &= \ddot{\bm{\mathcal{X}}}\_d + \bm{K}\_d \dot{\bm{e}}\_x + \bm{K}\_p \bm{e}\_x + \end{aligned} +\end{equation\*} + +In general, the tracking error dynamics can be represented by + +\begin{equation\*} + \ddot{\bm{e}}\_x + \bm{K}\_d \dot{\bm{e}}\_x + \bm{K}\_p \bm{e}\_x + \hat{\bm{M}}^{-1} \left[ \tilde{\bm{M}} \ddot{\bm{\mathcal{X}}} + \tilde{\bm{C}} \dot{\bm{\mathcal{X}}} + \tilde{\bm{G}} + \bm{\mathcal{F}}\_d \right] = 0 +\end{equation\*} + +This controller can be implemented in the joint space as follows: + +\begin{equation\*} + \begin{aligned} + \bm{\tau} &= \hat{\bm{M}}\_q \bm{a}\_q + \hat{\bm{C}}\_q \dot{\bm{q}} + \hat{\bm{G}}\_q \\\\\\ + \bm{a}\_q &= \ddot{\bm{q}}\_d + \bm{K}\_d \dot{\bm{e}}\_q + \bm{K}\_p \bm{e}\_q + \end{aligned} +\end{equation\*} + +by which the tracking error dynamics is summarized as + +\begin{equation\*} + \ddot{\bm{e}}\_q + \bm{K}\_d \dot{\bm{e}}\_q + \bm{K}\_p \bm{e}\_q + \hat{\bm{M}}\_{q}^{-1} \left[ \tilde{\bm{M}}\_q \ddot{\bm{q}} + \tilde{\bm{C}}\_q \dot{\bm{q}} + \tilde{\bm{G}}\_q + \bm{\mathcal{\tau}}\_d \right] = 0 +\end{equation\*} + + +##### Partial Linearization IDC {#partial-linearization-idc} + +To reduce the computational cost of the inverse dynamic control, it is possible to use partial linearization of dynamic formulation, just by gravity compensation, while keeping asymptotic tracking stability of the closed-loop system. +In which a case, the control input wrench in the task space is simplified to + +\begin{equation\*} + \bm{\mathcal{F}} = \bm{K}\_d \dot{\bm{e}}\_x + \bm{K}\_p \bm{e}\_x + \hat{\bm{G}}(\bm{\mathcal{X}}) +\end{equation\*} + +The following Lyapunov function may be used to analyze the stability of tracking dynamics of the closed-loop system: + +\begin{equation\*} + \dot{V} = \dot{\bm{\mathcal{X}}}^T \bm{M} \ddot{\bm{\mathcal{X}}} + \frac{1}{2} \dot{\bm{\mathcal{X}}}^T \dot{\bm{M}} \dot{\bm{\mathcal{X}}} + \bm{e}\_x^T \bm{K}\_p \bm{e}\_x +\end{equation\*} + +Stability analysis of the closed-loop system in this case reveals the fact that this simplified version of inverse dynamics control can lead to asymptotic tracking for constant desired trajectories. + + +##### Robust Inverse Dynamics Control {#robust-inverse-dynamics-control} + +To accommodate modeling uncertainties in inverse dynamic control, the following robust control scheme in the task space is developed: + +\begin{equation\*} + \begin{aligned} + \bm{\mathcal{F}} &= \hat{\bm{M}}(\bm{\mathcal{X}}) \bm{a}\_r + \hat{\bm{C}}(\bm{\mathcal{X}}, \dot{\bm{\mathcal{X}}})\dot{\bm{\mathcal{X}}} + \hat{\bm{G}}(\bm{\mathcal{X}}) \\\\\\ + \bm{a}\_r &= \ddot{\bm{\mathcal{X}}}\_d + \bm{K}\_d \dot{\bm{e}}\_x + \bm{K}\_p \bm{e}\_x + \bm{\delta}\_a + \end{aligned} +\end{equation\*} + +in which the robustifying corrective term \\(\bm{\delta}\_a\\) is found through a Lyapunov stability analysis of tracking error dynamics. +The tracking error dynamics can be represented by the following linear and nonlinear components: + +\begin{equation\*} + \bm{A} = \begin{bmatrix} + \bm{0} & \bm{I} \\\\\\ + -\bm{K}\_p & -\bm{K}\_d + \end{bmatrix}, \quad \bm{B} = \begin{bmatrix} + \bm{0} \\ \bm{I} + \end{bmatrix} +\end{equation\*} + +The corrective term \\(\bm{\delta}\_a\\) can be found as + +\begin{equation\*} + \bm{\delta}\_a = \left\\{ \begin{matrix} + - \rho \frac{v}{\\|v\\|} & \text{if} \\|v\\| > \epsilon \\\\\\ + - \rho \frac{v}{\epsilon} & \text{if} \\|v\\| \le \epsilon + \end{matrix} \right. +\end{equation\*} + +in which \\(v\\) is defined by \\(v = \bm{B}^T \bm{P} \bm{\epsilon}\\), where \\(\bm{P}\\) is the solution to the matrix Lyapunov equation and \\(\epsilon\\) is a smoothing threshold. +It is shown that by adding this corrective term to the regular inverse dynamics control, the closed-loop system achieves uniform ultimate bounded tracking errors. + + +##### Adaptive Inverse Dynamics Control {#adaptive-inverse-dynamics-control} + +In the adaptive version of the inverse dynamics control, full feedback linearization is considered through adaptive update of dynamic formulation matrices. +The error dynamics in this case is + +\begin{equation\*} + \dot{\bm{\epsilon}} = \bm{A} \bm{\epsilon} + \bm{B} \bm{\Phi} \tilde{\bm{\theta}} +\end{equation\*} + +in which + +\begin{equation\*} + \begin{aligned} + \bm{A} &= \begin{bmatrix} + \bm{0} & \bm{I} \\ -bm{K}\_p & -\bm{K}\_d + \end{bmatrix}, \quad \bm{B} = \begin{bmatrix} + \bm{0} \\ \bm{I} + \end{bmatrix} \\\\\\ + \bm{\Phi} &= \hat{\bm{M}}^{-1} \bm{\Upsilon}(\bm{\mathcal{X}}, \dot{\bm{\mathcal{X}}}, \ddot{\bm{\mathcal{X}}}) + \end{aligned} +\end{equation\*} + +Based on the Lyapunov stability analysis, by using the following Lyapunov function + +\begin{equation\*} + V = \bm{\epsilon}^T \bm{P} \bm{\epsilon} + \tilde{\bm{\theta}}^T \bm{\Gamma} \bm{\theta} +\end{equation\*} + +the following parameter adaptation law is derived for updates + +\begin{equation\*} + \dot{\hat{\bm{\theta}}} = - \bm{\Gamma}^{-1} \bm{\Phi}^T \bm{B}^T \bm{P} \bm{\epsilon} +\end{equation\*} + +By this means, the closed-loop system achieves asymptotic tracking performance, while the parameter estimation errors remain bounded. + + +### Motion Control of the Stewart-Gough Platform {#motion-control-of-the-stewart-gough-platform} + + +#### Control in the Task space {#control-in-the-task-space} + +For the Stewart-Gough platform, the motion variable in the task space is a six-dimensional vector +\\[ \bm{\mathcal{X}} = \begin{bmatrix} \bm{x}\_p \\ \bm{\theta} \end{bmatrix} \\] +with: + +- \\(\bm{x}\_p = [x\_p\ y\_p\ z\_p]^T\\) is the position vector of the motion platform center of mass +- \\(\bm{\theta} = \theta [s\_x\ s\_y\ s\_z]^T = [\theta\_x\ \theta\_y\ \theta\_z]^T\\) is the moving platform orientation expressed by screw coordinates + +Therefore, the tracking error is defined as \\(\bm{e} = [e\_x\ e\_y\ e\_z\ e\_{\theta\_x}\ e\_{\theta\_y}\ e\_{\theta\_z}]^T\\). + +The decentralized controller consists of six disjoint proportional derivative controllers acting on each error component and is denoted by \\(\bm{K}\_d s + \bm{K}\_p\\).
+ +The output of the controller is denoted by \\(\bm{\mathcal{F}} = [F\_x\ F\_y\ F\_z\ \tau\_x\ \tau\_y\ \tau\_z]^T\\). +Note that since the output of the controller is defined in the task space, each wrench component directly manipulates the corresponding tracking error component, and therefore, the overall tracking performance of the manipulator is suitable is high controller gains are used.
+ +In practice, the calculated output wrench is transformed into actuator forces through the force distribution block corresponding to the inverse of Jacobian transpose.
+ + +#### Control in the Joint space {#control-in-the-joint-space} + +The joint variable \\(\bm{q}(t)\\) is a six-dimensional vector consisting of the limb lengths denoted by \\(\bm{q} = [l\_1\ l\_2\ l\_3\ l\_4\ l\_5\ l\_6]^T\\). +Therefore, the tracking error is defined as \\(\bm{e}\_q = \bm{q}\_d - \bm{q}\\), in which is the desired motion variable in the joint space \\(\bm{q}\_d\\) is determined by the solution of inverse kinematics, and \\(\bm{q}\\) is given by direct measurement of the limb lengths.
+ +The decentralized controller, therefore, consists of six disjoint PD controllers acting on each error component. +The output of the controller directly generates the actuator torques denoted by \\(\tau\\).
+ +In simulation, it is observe that the orientation error significantly increase in the joint space control scheme. +The main reason is that the controller gains directly penalize the position error of the limb lengths, and not the orientation errors, and therefore, there is no direct controller action to be suitably tuned to reduce the orientation error.
+ +Comparing the closed-loop performance of the PD controllers designed in the joint space to those designed in the task space, it can be concluded that **tuning of the PD gains for a suitable performance is much easier in task space designs**. +Furthermore, a very small error signature in the joint space may be accumulated to produce relatively larger tracking errors in the task space. +Hence, it is recommended to design and implement controllers in the task space, if the required motion variables can be directly measured or the forward kinematic solution can be calculated in an online routine. + + +## Force Control {#force-control} + + + + +### Introduction {#introduction} + +In many applications, it may occur that the robot moving platform is in contact with a **stiff** environment and **specific interacting wrench is required**. +In such applications, the contact wrench describes the state of interaction more effectively than the position and orientation of the moving platform. + +
+
+ +The problem of force control can be described as to derive the actuator forces required to generate a prescribed desired wrench at the manipulator moving platform, when the manipulator is carrying out its desired motion. + +
+ +This problem and its extents are treated in the **force control** algorithms described in this chapter. +A force control strategy is one that modifies position trajectories based on the sensed wrench, or force-motion relations.
+ +If a pure **motion control** scheme is used for manipulator, in case it contacts an environment, the robot does not sense the presence of the environment, and its driving forces become harshly high to reduce the tracking errors. +In such a case, the robot may break the object it is in contact or will break its internal structure. +Additional sensors should be included in the manipulator in order for it to be able to feel the interaction and to control the interacting forces. +Different wrench sensors are developed for such applications, and it is possible to use joint torque or link force measurement units to determine the projection of the interacting forces in the joint space.
+ +The use of wrench sensors either in the task space or in the joint space open horizons to use different force control topologies for the manipulators. +Using such sensors does not imply that regular motion sensors used in motion control schemes are not necessary. +The use of motion sensors and the usual corresponding control topologies are usually necessary, since the motion of the manipulator is one of the outputs to be controlled. +Depending on the type and configuration of the wrench sensors, different force control topologies are developed.
+ + +### Controller Topology {#controller-topology} + +For a force control scheme, the desired interacting wrench of the moving platform and the environment may be of interest. +This quantity may be denoted by \\(\bm{\mathcal{F}}\_d\\), which has the same dimension and structure of the manipulator wrench \\(\bm{\mathcal{F}}\\). +To carry out such a control task in a closed-loop structure, it is necessary to **measure the output wrench** of the manipulator through an instrumentation system.
+ +Although there are many commercial six-degrees-of-freedom wrench sensors available in the market, they are usually more expensive than single joint force measurement units. +Another alternative for force measurement is **direct measurement of the actuator forces**. +Many commercial linear actuators are available in the market in which **embedded force measurement** is considered in their design. +Therefore, it might be preferable in some applications to use direct actuator force measurements to carry out the feedback control. + + +#### Cascade Control {#cascade-control} + +In a general force control scheme, the **prime objective** is tracking of the interacting wrench between the moving platform and the environment. +However, note that the motion control of the robot when the robot is in interaction with the environment is also another **less-important objective** and when the contact of the robot moving platform is released, motion control becomes the prime objective. + +
+
+ +To follow **two objectives** with different properties in one control system, usually a **hierarchy** of two feedback loops is used in practice. +This kind of control topology is called **cascade control**, which is used when there are **several measurements and one prime control variable**. +Cascade control is implemented by **nesting** the control loops, as shown in Figure [fig:cascade_control](#fig:cascade_control). +The output control loop is called the **primary loop**, while the inner loop is called the secondary loop and is used to fulfill a secondary objective in the closed-loop system. + +
+ + + +{{< figure src="/ox-hugo/taghirad13_cascade_control.png" caption="Figure 27: Block diagram of a closed-loop system with cascade control" >}} + +The measured variables are here the motion and interacting wrench that may be measured in the task space or in the joint space, and therefore, different control topologies may be advised for each set of measurement variables.
+ +To improve the performance of the control system for a particular objective, it is important to **choose the right variables for internal and external feedback loops**, and to **design suitable controllers for each feedback system**. +Although these differ in different topologies described in the following sections, some **general rules** are applied to design a well performing cascade control system. + +A general idea in cascade control design is the **ideal case**, in which the inner loop is designed so tight that the secondary (inner) loop behaves as a **perfect servo**, and responds very quickly to the internal control command. +This idea is effectively used in many applications, wherein a nearly-perfect actuator to respond to the requested commands is designed by using an inner control feedback. + +
+
+ +The design criteria for the inner loop is to have a high control gain such that the time response of the secondary variable is at least 5 times more than that of the primary variable, and such that it can overcome the effect of **disturbances** and **unmodelled dynamics** in the **internal feedback structure**. + +
+ +It is also necessary to have a well-defined relation between the primary and secondary variables, to have harmony in the objectives followed in the primary and secondary loops. + + +#### Force Feedback in Outer Loop {#force-feedback-in-outer-loop} + +Consider the force control schemes, in which **force tracking is the prime objective**. +In such a case, it is advised that the outer loop of cascade control structure is constructed by wrench feedback, while the inner loop is based on position feedback. +Since different types of measurement units may be used in parallel robots, different control topologies may be constructed to implement such a cascade structure.
+ +Consider first the cascade control topology shown in Figure [fig:taghira13_cascade_force_outer_loop](#fig:taghira13_cascade_force_outer_loop) in which the measured variables are both in the **task space**. +The inner loop is constructed by position feedback while the outer loop is based on force feedback. +As seen in Figure [fig:taghira13_cascade_force_outer_loop](#fig:taghira13_cascade_force_outer_loop), the force controller block is fed to the motion controller, and this might be seen as the **generated desired motion trajectory for the inner loop**. + +The output of motion controller is also designed in the task space, and to convert it to implementable actuator force \\(\bm{\tau}\\), the force distribution block is considered in this topology.
+ + + +{{< figure src="/ox-hugo/taghira13_cascade_force_outer_loop.png" caption="Figure 28: Cascade topology of force feedback control: position in inner loop and force in outer loop. Moving platform wrench \\(\bm{\mathcal{F}}\\) and motion variable \\(\bm{\mathcal{X}}\\) are measured in the task space" >}} + +Other alternatives for force control topology may be suggested based on the variations of position and force measurements. +If the force is measured in the joint space, the topology suggested in Figure [fig:taghira13_cascade_force_outer_loop_tau](#fig:taghira13_cascade_force_outer_loop_tau) can be used. +In this topology, the measured actuator force vector \\(\bm{\tau}\\) is mapped into its corresponding wrench in the task space by the Jacobian transpose mapping \\(\bm{\mathcal{F}} = \bm{J}^T \bm{\tau}\\).
+ + + +{{< figure src="/ox-hugo/taghira13_cascade_force_outer_loop_tau.png" caption="Figure 29: Cascade topology of force feedback control: position in inner loop and force in outer loop. Actuator forces \\(\bm{\tau}\\) and motion variable \\(\bm{\mathcal{X}}\\) are measured" >}} + +Consider the case where the force and motion variables are both measured in the **joint space**. +Figure [fig:taghira13_cascade_force_outer_loop_tau_q](#fig:taghira13_cascade_force_outer_loop_tau_q) suggests the force control topology in the joint space, in which the inner loop is based on measured motion variable in the joint space, and the outer loop uses the measured actuator force vector. +In this topology, it is advised that the force controller is designed in the **task** space, and the Jacobian transpose mapping is used to project the measured actuator force vector into its corresponding wrench in the task space. +However, as the inner loop is constructed in the joint space, the desired motion variable \\(\bm{\mathcal{X}}\_d\\) is mapped into joint space using **inverse kinematic** solution. + +Therefore, the structure and characteristics of the position controller in this topology is totally different from that given in the first two topologies.
+ + + +{{< figure src="/ox-hugo/taghira13_cascade_force_outer_loop_tau_q.png" caption="Figure 30: Cascade topology of force feedback control: position in inner loop and force in outer loop. Actuator forces \\(\bm{\tau}\\) and joint motion variable \\(\bm{q}\\) are measured in the joint space" >}} + + +#### Force Feedback in Inner Loop {#force-feedback-in-inner-loop} + +Consider the force control scheme in which the **motion-force relation is the prime objective**. +In such a case, force tracking is not the primary objective, and it is advised that the outer loop of cascade control structure consists of a motion control feedback. + +Since different type of measurement units may be used in parallel robots, different control topologies may be constructed to implement such cascade controllers.
+ +Figure [fig:taghira13_cascade_force_inner_loop_F](#fig:taghira13_cascade_force_inner_loop_F) illustrates the cascade control topology for the system in which the measured variables are both in the task space (\\(\bm{\mathcal{F}}\\) and \\(\bm{\mathcal{X}}\\)). +The inner loop is loop is constructed by force feedback while the outer loop is based on position feedback. +By this means, when the manipulator is not in contact with a stiff environment, position tracking is guaranteed through the primary controller. +However, when there is interacting wrench \\(\bm{\mathcal{F}}\_e\\) applied to the moving platform, this structure controls the force-motion relation. +This configuration may be seen as if the **outer loop generates a desired force trajectory for the inner loop**.
+ + + +{{< figure src="/ox-hugo/taghira13_cascade_force_inner_loop_F.png" caption="Figure 31: Cascade topology of force feedback control: force in inner loop and position in outer loop. Moving platform wrench \\(\bm{\mathcal{F}}\\) and motion variable \\(\bm{\mathcal{X}}\\) are measured in the task space" >}} + +Other alternatives for control topology may be suggested based on the variations of position and force measurements. +If the force is measured in the joint space, control topology shown in Figure [fig:taghira13_cascade_force_inner_loop_tau](#fig:taghira13_cascade_force_inner_loop_tau) can be used. +In such case, the Jacobian transpose is used to map the actuator force to its corresponding wrench in the task space.
+ + + +{{< figure src="/ox-hugo/taghira13_cascade_force_inner_loop_tau.png" caption="Figure 32: Cascade topology of force feedback control: force in inner loop and position in outer loop. Actuator forces \\(\bm{\tau}\\) and motion variable \\(\bm{\mathcal{X}}\\) are measured" >}} + +If the force and motion variables are both measured in the **joint** space, the control topology shown in Figure [fig:taghira13_cascade_force_inner_loop_tau_q](#fig:taghira13_cascade_force_inner_loop_tau_q) is suggested. +The inner loop is based on the measured actuator force vector in the joint space \\(\bm{\tau}\\), and the outer loop is based on the measured actuated joint position vector \\(\bm{q}\\). +In this topology, the desired motion in the task space is mapped into the joint space using **inverse kinematic** solution, and **both the position and force feedback controllers are designed in the joint space**. +Thus, independent controllers for each joint may be suitable for this topology. + + + +{{< figure src="/ox-hugo/taghira13_cascade_force_inner_loop_tau_q.png" caption="Figure 33: Cascade topology of force feedback control: force in inner loop and position in outer loop. Actuator forces \\(\bm{\tau}\\) and joint motion variable \\(\bm{q}\\) are measured in the joint space" >}} + + +### Stiffness Control {#stiffness-control} + + +#### Single-Degree-of-Freedom Stiffness Control {#single-degree-of-freedom-stiffness-control} + + +#### General Stiffness Control {#general-stiffness-control} + + +#### Stiffness Contorl of the Stewart-Gough Platform {#stiffness-contorl-of-the-stewart-gough-platform} + + +### Direct Force Control {#direct-force-control} + + + +{{< figure src="/ox-hugo/taghira13_direct_force_control.png" caption="Figure 34: Direct force control scheme, force feedback in the outer loop and motion feedback in the inner loop" >}} + + +### Impedance Control {#impedance-control} + +For the stiffness control and direct force control schemes, it is observed that when the manipulator-moving platform is in contact with a stiff environment, the motion variable \\(\bm{\mathcal{X}}\\) and the interacting force variable \\(\bm{\mathcal{F}}\\) are two dynamically **dependent** quantities.
+ +In stiffness control, it is aimed to adjust the **static** relation between these two quantities. +In this scheme, no force measurement is required, however, careful design on the desired motion trajectory and PD controller gains is needed to tune the stiffness property of the interaction at steady stage.
+ +In force control schemes, on the other hand, the force tracking is the prime objective, and force measurement is a stringent requirement to implement such schemes. + +The main reason that the motion and force variables are not being gable to be controlled independently is that for an n-degrees-of-freedom manipulator, only n-independent control inputs are available, and therefore, only n-independent variables can be controlled, while the force and motion quantities count to \\(2n\\) independent variables.
+ +
+
+ +The key idea behind **impedance control** schemes, is to **tune the dynamic relation between the force and the motion variables**, and not a hierarchy of tracking objectives in force and in position variables. +In this scheme, contrary to stiffness control schemes, **both force and position variables are measured** and used in the control structure. + +
+ +The definition of mechanical **impedance** is given in an analogy of the well-known electrical impedance definition as the **relationship between the effort and flow variables**. +Since this relation can be well determined in the frequency domain, the dynamical relation of force and motion variable may be represented by mechanical impedance. +Impedance control schemes provide control topology to tune the mechanical impedance of a system to a desired value. +By this means, the force and the motion variables are not controlled independently, or in a hierarchy, but their dynamic relation represented by mechanical impedance is suitably controlled. + + +#### Impedance {#impedance} + +Impedance was first defined in electrical networks as the measure of the opposition that an electrical circuit presents to the passage of a current when a voltage is applied. +To **generalize** the impedance definition to other disciplines, voltage is generalized to the **effort** and current is generalized to the **flow**. + +Impedance is a **complex** function defined as the ratio of the Laplace transform of the effort to the Laplace transform of the flow.
+ +Impedance is usually denoted by \\(\bm{Z}(s)\\) and it may be represented by writing its magnitude and phase in the form of \\(\abs{\bm{Z}(s)}\\) and \\(\angle{\bm{Z}(s)}\\). +The magnitude of the complex impedance \\(\abs{\bm{Z}}\\) is the ratio of the effort amplitude to that of the flow, while the phase \\(\angle{\bm{Z}}\\) is the phase shift by which the flow is ahead of the effort. + +
+
+ +Mechanical Impedance is defined as the ratio of the Laplace transform of the mechanical effort to the Laplace transform of the mechanical flow: + +\begin{equation} + \bm{Z}(s) = \frac{\bm{F}(s)}{\bm{v}(s)} +\end{equation} + +in which effort in mechanical systems is represented by force \\(\bm{F}\\) and flow is represented by velocity \\(\bm{v}\\). + +
+ +Note that this definition is given for a single-degree-of-freedom motion system. +The motion can be generalized to angular motion, in which the effort is represented by torque, while the flow is represented by angular velocity. +Furthermore, the impedance may be generalized to multiple-degrees-of-freedom system, in which for a general spatial motion effort is represented by a wrench \\(\bm{\mathcal{F}}\\), while flow is represented by motion twist \\(\dot{\bm{\mathcal{X}}}\\).
+ +Nevertheless, note that Laplace transform is only applicable for **linear time invariant** systems, and for a parallel manipulator the dynamic formulation of which is nonlinear, the concept of mechanical impedance may be extended to the differential equation relating the mechanical wrench \\(\bm{\mathcal{F}}\\) to motion twist \\(\dot{\bm{\mathcal{X}}}\\). + +
+
+ +Consider an RLC circuit depicted in Figure [fig:taghirad13_impedance_control_rlc](#fig:taghirad13_impedance_control_rlc). +The differential equation relating voltage \\(v\\) to the current \\(i\\) is given by +\\[ v = L\frac{di}{dt} + Ri + \int\_0^t \frac{1}{C} i(\tau)d\tau \\] +in which \\(L\\) denote the inductance, \\(R\\) the resistance and \\(C\\) the capacitance. + +The impedance of the system may be found from the Laplace transform of the above equation: + +\begin{equation} + Z(s) = \frac{v(s)}{i(s)} = Ls + R + \frac{1}{Cs} \label{eq:rlc\_impedance} +\end{equation} + +
+ +
+
+ +Consider the mass-spring-damper system depicted in Figure [fig:taghirad13_impedance_control_rlc](#fig:taghirad13_impedance_control_rlc). +The governing dynamic formulation for this system is given by +\\[ m \ddot{x} + c \dot{x} + k x = f \\] +in which \\(m\\) denote the body mass, \\(c\\) the damper viscous coefficient and \\(k\\) the spring stiffness. + +The impedance of the system may be found from the Laplace transform of the above equation: + +\begin{equation} + Z(s) = \frac{f(s)}{v(s)} = ms + c + \frac{k}{s} \label{eq:mass\_spring\_damper\_impedance} +\end{equation} + +
+ + + +{{< figure src="/ox-hugo/taghirad13_impedance_control_rlc.png" caption="Figure 35: Analogy of electrical impedance in (a) an electrical RLC circuit to (b) a mechanical mass-spring-damper system" >}} + +As inferred from the above two examples, although the physical nature of the system may differ from each other, they may be represented by similar impedances. +From this analogy, a terminology for impedance is introduced. + +
+
+ +An impedance \\(\bm{Z}(s)\\) is called + +- **Inductive** if \\(\abs{\bm{Z}(0)} = 0\\) +- **Resistive** if \\(\abs{\bm{Z}(0)} = R\\) +- **Capacitive** if \\(\lim\_{s\to 0} \abs{\bm{K}(s)} = \infty\\) + +
+ +Hence, for the mechanical system represented in Figure [fig:taghirad13_impedance_control_rlc](#fig:taghirad13_impedance_control_rlc): + +- mass represents inductive impedance +- viscous friction represents resistive impedance +- spring stiffness represents capacitive impedance + +The environments that a robot interacts with may be represented by these classified impedance components. +A very stiff environment may be represented by high-capacitive impedance models, whereas an environment with high structural damping may be represented by high-resitive impedance. + + +#### Impedance Control Concept {#impedance-control-concept} + +The key idea behind impedance control schemes is to tune the dynamic relation between force and motion variables. +Impedance control schemes provide control topology to tune the mechanical impedance of a system toward a desired impedance. + +A desired impedance could be adjusted by desired inductive, resistive and capacitive impedances, which forms a desired linear impedance for the closed-loop system as follows: +\\[ \bm{Z}\_d(s) = \bm{M}\_d s + \bm{C}\_d + \frac{1}{s} \bm{K}\_d \\] +where \\(\bm{Z}\_d\\) denotes the desired impedance of the closed-loop system, which is composed of the desired inductive impedance \\(\bm{M}\_d\\), desired resistive impedance \\(\bm{C}\_d\\) and desired capacitive impedance \\(\bm{K}\_d\\). +Impedance control structures may be used to tune the closed-loop impedance of the system suitably to follow such a desired impedance. + + +#### Impedance Control Structure {#impedance-control-structure} + +Consider a parallel robot with multiple-degrees-of-freedom interacting with a stiff environment. +In such a case, the motion of the robot end effector is represented by the motion vector \\(\bm{\mathcal{X}}\\), and the interacting wrench applied to the robot end effector is denoted by \\(\bm{\mathcal{F}}\_e\\). +It is considered that the interacting wrench is measured in the task space and is used in the inner force feedback loop. +Furthermore, it is considered that the motion variable \\(\bm{\mathcal{X}}\\) is measured and is used in the outer feedback loop.
+ +In the impedance control scheme, **regulation of the motion-force dynamic relation is the prime objective**, and since force tracking is not the primary objective, it is advised to used a cascade control structure with motion control feedback in the outer loop and force feedback in the inner loop. + +Therefore, when the manipulator is not in contact with a stiff environment, position tracking is guaranteed by a primary controller. +However, when there is an interacting wrench \\(\bm{\mathcal{F}}\_e\\) applied to the moving platform, this structure may be designed to control the force-motion dynamic relation.
+ +As a possible impedance control scheme, consider the closed-loop system depicted in Figure [fig:taghira13_impedance_control](#fig:taghira13_impedance_control), in which the position feedback is considered in the outer loop, while force feedback is used in the inner loop. +This structure is advised when a desired impedance relation between the force and motion variables is required that consists of desired inductive, resistive, and capacitive impedances. +As shown in Figure [fig:taghira13_impedance_control](#fig:taghira13_impedance_control), the motion-tracking error is directly determined from motion measurement by \\(\bm{e}\_x = \bm{\mathcal{X}}\_d - \bm{\mathcal{X}}\\) in the outer loop and the motion controller is designed to satisfy the required impedance. + +Moreover, direct force-tracking objective is not assigned in this control scheme, and therefore the desired force trajectory \\(\bm{\mathcal{F}}\_d\\) is absent in this scheme. +However, an auxiliary force trajectory \\(\bm{\mathcal{F}}\_a\\) is generated from the motion control law and is used as the reference for the force tracking. +By this means, no prescribed force trajectory is tracked, while the **motion control scheme would advise a force trajectory for the robot to ensure the desired impedance regulation**.
+ + + +{{< figure src="/ox-hugo/taghira13_impedance_control.png" caption="Figure 36: Impedance control scheme; motion feedback in the outer loop and force feedback in the inner loop" >}} + +The required wrench \\(\bm{\mathcal{F}}\\) in the impedance control scheme, is based on inverse dynamics control and consists of three main parts. +In the inner loop, the force control scheme is based on a feedback linearization part in addition to a mass matrix adjustment, while in the outer loop usually a linear motion controller is considered based on the desired impedance requirements. + +Although many different impedance structures may be considered as the basis of the control law, in Figure [fig:taghira13_impedance_control](#fig:taghira13_impedance_control), a linear impedance relation between the force and motion variables is generated that consists of desired inductive \\(\bm{M}\_d\\), resistive \\(\bm{C}\_d\\) and capacitive impedances \\(\bm{K}\_d\\).
+ +According to Figure [fig:taghira13_impedance_control](#fig:taghira13_impedance_control), the controller output wrench \\(\bm{\mathcal{F}}\\), applied to the manipulator may be formulated as +\\[ \bm{\mathcal{F}} = \hat{\bm{M}} \bm{M}\_d^{-1} \bm{e}\_F + \bm{\mathcal{F}}\_{fl} \\] +with: + +\begin{align\*} + \bm{e}\_F &= \bm{\mathcal{F}}\_a - \bm{\mathcal{F}}\_m \\\\\\ + \bm{\mathcal{F}}\_a &= \bm{M}\_d \ddot{\bm{\mathcal{X}}}\_{d} + \bm{C}\_{d} \dot{\bm{e}}\_{x} + \bm{K}\_{d} \bm{e}\_{x} +\end{align\*} + +\\(\bm{M}\_d\\) denotes the desired inductive impedance, \\(\bm{C}\_d\\) the desired resistive impedance and \\(\bm{K}\_d\\) is desired capacitive impedance matrices. + +The feedback linearizing term is given by: +\\[ \bm{\mathcal{F}}\_{fl} = \hat{\bm{C}}(\bm{\mathcal{X}}, \dot{\bm{\mathcal{X}}}) \dot{\bm{\mathcal{X}}} + \hat{\bm{G}}(\bm{\mathcal{X}}) + \bm{\mathcal{F}}\_m \\] + +If the information on the dynamic matrices is complete, and if the force measurements are noise free (\\(\bm{\mathcal{F}}\_m = \bm{\mathcal{F}}\_e\\)), the closed-loop dynamic formulation simplifies to: +\\[ \bm{M}\_d \ddot{\bm{e}}\_x + \bm{C}\_d \dot{\bm{e}}\_x + \bm{K}\_d \bm{e}\_x = \bm{\mathcal{F}}\_e \\] + +And thus the closed-loop error dynamic equation satisfies a set of second-order systems with the desired impedance coefficients in relation to the interacting force. +In other words, **the control structure guarantees that the force and motion relation follows a desired impedance**. +Therefore, by choosing appropriate impedance matrices, the transient performance of the force-motion relation can be shaped so as to have a fast but stable interaction. +By this means, what is controlled is the dynamic relation between the force and motion variables, and not directly the position. +However, if the robot is moving freely in space and has no interaction with the environment, \\(\bm{\mathcal{F}}\_e = 0\\), the closed-loop system will provide a suitable motion tracking thanks to the motion controller in the outer loop.
+ +The impedance control scheme is very popular in practice, wherein tuning the force and motion relation in a robot manipulator interacting with a stiff environment is the prime objective. +However, note that for a good performance, an accurate model of the system is required, and the obtained force and motion dynamics are not robust to modeling uncertainty. + +# Bibliography +Taghirad, H., *Parallel robots : mechanics and control* (2013), Boca Raton, FL: CRC Press. [↩](#1a9683d2a60e031fa9b8ed16f4944c9f) diff --git a/content/paper/_index.md b/content/paper/_index.md new file mode 100644 index 0000000..8410710 --- /dev/null +++ b/content/paper/_index.md @@ -0,0 +1,8 @@ ++++ +title = "Papers" +author = ["Thomas Dehaeze"] +type = "paper" +draft = false ++++ + +Here is the list of papers I took note about. diff --git a/content/paper/alkhatib03_activ_struc_vibrat_contr.md b/content/paper/alkhatib03_activ_struc_vibrat_contr.md new file mode 100644 index 0000000..5d1740f --- /dev/null +++ b/content/paper/alkhatib03_activ_struc_vibrat_contr.md @@ -0,0 +1,230 @@ ++++ +title = "Active structural vibration control: a review" +author = ["Thomas Dehaeze"] +draft = false +tags = ["tag1", "tag2"] +categories = ["cat1", "cat2"] ++++ + +Tags +: + + +Reference +: (Rabih Alkhatib \& Golnaraghi, 2003) + +Author(s) +: Alkhatib, R., & Golnaraghi, M. F. + +Year +: 2003 + + +## Process of designing an active vibration control system {#process-of-designing-an-active-vibration-control-system} + +1. Analyze the structure to be controled +2. Obtain an idealized mathematical model with FEM or experimental modal analysis +3. Reduce the model order is necessary +4. Analyze the resulting model: dynamics properties, types of disturbances, ... +5. Quantify sensors and actuators requirements. Decide on their types and location +6. Analyze the impact of the sensors and actuators on the overall dynamic characteristics +7. Specify performance criteria and stability tradeoffs +8. Device of the type of control algorythm to be employed and design a controller to meet the specifications +9. Simulate the resulting controlled system on a computer +10. If the controller does not meet the requirements, adjust the specifications or modify the type of controller +11. Choose hardware and software and integrate the components on a pilot plant +12. Formulate experiments and perform system identification and model updating +13. Implement controller and carry out system test to evaluate the performance + + +## Feedback control {#feedback-control} + + +### Active damping {#active-damping} + +The objective is to reduce the resonance peaks of the closed loop transfer function. + +\\[T(s) = \frac{G(s)H(s)}{1+G(s)H(s)}\\] + +Then \\(T(s) \approx G(s)\\) except near the resonance peaks where the amplitude is reduced. + +This method can be realized without a model of the structure with **guaranteed stability**, granted that the actuators and sensors are **collocated**. + + +### Model based feedback {#model-based-feedback} + +Objective: keep a control variable (position, velocity, ...) to a desired value in spite of external disturbances \\(d(s)\\). + +We have \\[\frac{y(s)}{d(s)} = \frac{1}{1+G(s)H(s)}\\] so we need large values of \\(G(s)H(s)\\) in the frequency range where the disturbance has considerable effect. + +To do so, we need a mathematical model of the system, then the control bandwidth and effectiveness are restricted by the accuracy of the model. +Unmodeled structural dynamics may destabilize the system. + + +## Feedforward Control {#feedforward-control} + +We need a signal that is correlated to the disturbance. Then feedforward can improve performance over simple feedback control. + +An adaptive filter manipulates the signal correlated to the disturbance and the output is applied to the system by the actuator. +The filter coefficients are adapted in such a way that an error signal is minimized. +The idea is to generate a secondary disturbance, which destructively interferes with the effect of the primary distance at the location of the error sensor. +However, there is no guarantee that the global response is also reduced at other locations. + +The method is considered to be a **local technique**, in contrast to feedback which is global. + +Contrary to active damping which can only reduce the vibration near the resonance, **feedforward control can be effective for any frequency**. +The major restriction to the application of feedforward adaptive filtering is the accessibility of a reference signal correlated to the disturbance. + + +
+ Table 1: + Comparison of control strategies +
+ +| Type of control | Advantages | Disadvantages | +|--------------------------------|---------------------------------------------|-----------------------------------------------| +| Active Damping | Simple to implement | Effective only near resonance | +| | Does not required accurate model | | +| | Guaranteed stability (collocated) | | +| Model Based | Global method | Requires accurate model | +| | Attenuate all disturbance within bandwidth | Required low delay | +| | | Limited bandwidth | +| | | Spillover | +| Feedforward Adaptive filtering | No model is necessary | Error signal required | +| | Robust to change in plant transfer function | Local method: may amplify vibration elsewhere | +| | More effective for narrowband disturbance | Large amount of real-time computation | + + +## Controllability and Observability {#controllability-and-observability} + +Controllability and Observability are two fundamental qualitave properties of dynamic systems. + +A system is said to be **controllable** if every state vector can be transform to a desirate state in finite time by the application of unconstrained control inputs. + +A system is said to be **observable** at time \\(t\_0\\) if for a state \\(z(t\_0)\\), there is a finite time \\(t\_1>t\_0\\) such that the knowledge of the input \\(u(t)\\) and output \\(y(t)\\) from \\(t\_0\\) to \\(t\_1\\) are sufficient to determine the state \\(z(t\_0)\\). + + +## Coordinate Coupling Control {#coordinate-coupling-control} + +Coordinate coupling control (CCC) is an **energy-basded method**. + +The idea is to **transfer the vibrations** from a low or undamped oscilatory system (the plant) to a damped system (the controller). + +This can be implemented passively using tuned mass damper. But the key advantage of this technique is that one can replace the physical absorber with a computer model. The coupling terms can then be selected to maximise the energy transfer. + + +## Robust control {#robust-control} + +Robust control concentrates on the **tradeoffs between performance and stability** in the presence of uncertainty in the system model as well as the exogenous inputs to which it is subjected. + +Uncertainty can be divided into four types: + +- parameter errors +- error in model order +- neglected disturbances +- neglected nonlinearities + +The \\(\mathcal{H}\_\infty\\) controller is developed to address uncertainty by systematic means. +A general block diagram of the control system is shown figure [1](#org95c575a). + +A **frequency shaped filter** \\(W(s)\\) coupled to selected inputs and outputs of the plant is included. +The outputs of this frequency shaped filter define the error ouputs used to evaluate the system performance and generate the **cost** that will be used in the design process. + + + +{{< figure src="/ox-hugo/alkhatib03_hinf_control.png" caption="Figure 1: Block diagram for robust control" >}} + +The generalized plan \\(G\\) can be partitionned according to the input-output variables. And we have that the transfer function matrix from \\(d\\) to \\(z\\) is: +\\[ H\_{z/d} = G\_{z/d} + G\_{z/u} K (I - G\_{y/u} K)^{-1} G\_{y/d} \\] +This transfer function matrix contains measures of performance and stability robustness. + +The objective of \\(\mathcal{H}\_\infty\\) control is to design an admissible control \\(u(s)=K(s)y(s)\\) such that \\(\\| H\_{z/d} \\|\_\infty\\) is minimum. + + +## Optimal Control {#optimal-control} + +The control \\(u(t)\\) is designed to minimize a cost function \\(J\\), given the initial conditions \\(z(t\_0)\\) and \\(\dot{z}(t\_0)\\) subject to the constraint that: + +\begin{align\*} +\dot{z} &= Az + Bu\\\\\\ +y &= Cz +\end{align\*} + +One such cost function appropriate to a vibration control is +\\[J = 1/2 \int\_{t\_0}^{t\_f} ( z^T A z + u^T R u ) dt\\] +Where \\(Q\\) and \\(R\\) and positive definite symmetric weighting matrices. + + +## State Observers (Estimators) {#state-observers--estimators} + +It is not always possible to determine the entire state variables. There are usualy too many degrees of freedom and only limited measurements. + +The state vector \\(z(t)\\) can be estimated independently of the control problem, and the resulting estimate \\(\hat{z}(t)\\) can be used. + + +## Intelligent Structure and Controller {#intelligent-structure-and-controller} + +Intelligent structure would have the capability to: + +- recognize the present dynamic state of its own structure and evaluate the functional performance of the structure +- identify functional descriptions of external and internal disturbances +- detect changes in structural properties and changes in external and internal disturbances +- predict possible future changes in structural properties +- make intelligent decisions regarding compensations for disturbances and adequately generale actuation forces +- learn from past performance to improve future actions + +Two main methodologies: + +- artificial neural networks +- fuzzy logic + + +## Adaptive Control {#adaptive-control} + +Adaptive control is frequently used to control systems whose parameters are unknown, uncertain, or slowly varying. + +The design of an adaptive controller involves several steps: + +- selection of a controller structure with adjustable parameters +- selection of an adaptation law for adjusting those parameters +- selection of a performance index +- real-time evaluation of the performance with respect to some desired behavior +- real-time plant identification and model updating +- real-time adjustment of the controller parameters to bring the performance closer to the desired behavior + +It essentially consists of a real-time system identification technique integrated with a control algorithm. + +Two different methods + +- **Direct method**: the controller parameters are adjusted directly based on the error between the measured and desired outputs. +- **Indirect method**: the computations are divided into two consecutive phases. First, the plant model is first estimated in real time. Second, the controller parameters are modified based on the most recent updated plant parameters. + + +## Active Control Effects on the System {#active-control-effects-on-the-system} + + + +{{< figure src="/ox-hugo/alkhatib03_1dof_control.png" caption="Figure 2: 1 DoF control of a spring-mass-damping system" >}} + +Consider the control system figure [2](#org7c357dd), the equation of motion of the system is: +\\[ m\ddot{x} + c\dot{x} + kx = f\_a + f \\] + +The controller force can be expressed as: \\(f\_a = -g\_a \ddot{x} + g\_v \dot{x} + g\_d x\\). The equation of motion becomes: +\\[ (m+g\_a)\ddot{x} + (c+g\_v)\dot{x} + (k+g\_d)x = f \\] + +Depending of the type of signal used, the active control adds/substracts mass, damping and stiffness. + + +## Time Delays {#time-delays} + +One of the limits to the performance of active control is the time delay in controllers and actuators. Time delay introduces phase shift, which deteriorates the controller performance or even causes instability in the system. + + +## Optimal Placement of Actuators {#optimal-placement-of-actuators} + +The problem of optimizing the locations of the actuators can be more significant than the control law itself. + +If the actuator is placed at the wrong location, the system will require a greater force control. In that case, the system is said to have a **low degree of controllability**. + +# Bibliography +Alkhatib, R., & Golnaraghi, M. F., *Active structural vibration control: a review*, The Shock and Vibration Digest, *35(5)*, 367–383 (2003). http://dx.doi.org/10.1177/05831024030355002 [↩](#279b5558de3a8131b329a9ba1a99e4f8) diff --git a/content/paper/bibel92_guidel_h.md b/content/paper/bibel92_guidel_h.md new file mode 100644 index 0000000..1ed878d --- /dev/null +++ b/content/paper/bibel92_guidel_h.md @@ -0,0 +1,185 @@ ++++ +title = "Guidelines for the selection of weighting functions for h-infinity control" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [H Infinity Control]({{< relref "h_infinity_control" >}}) + +Reference +: (Bibel \& Malyevac, 1992) + +Author(s) +: Bibel, J. E., & Malyevac, D. S. + +Year +: 1992 + + +## Properties of feedback control {#properties-of-feedback-control} + + + +{{< figure src="/ox-hugo/bibel92_control_diag.png" caption="Figure 1: Control System Diagram" >}} + +From the figure [1](#org82bead2), we have: + +\begin{align\*} +y(s) &= T(s) r(s) + S(s) d(s) - T(s) n(s)\\\\\\ +e(s) &= S(s) r(s) - S(s) d(s) - S(s) n(s)\\\\\\ +u(s) &= S(s)K(s) r(s) - S(s)K(s) d(s) - S(s)K(s) n(s) +\end{align\*} + +With the following definitions + +- \\(L(s) = G(s)K(s)\\) is the **loop transfer matrix** +- \\(S(s) = [I+G(s)K(s)]^{-1}\\) is the **Sensitivity** function matrix +- \\(T(s) = [I+G(s)K(s)]^{-1}G(s)K(s)\\) is the **Transmissibility** function matrix + +
+
+ +\\[ S(s) + T(s) = 1 \\] + +
+ +
+
+ +- **Command following**: \\(S=0\\) and \\(T=1\\) => large gains +- **Disturbance rejection**: \\(S=0\\) => large gains +- **Sensor noise attenuation**: \\(T\\) small where the noise is concentrated +- **Control Sensitivity minimization**: \\(K S\\) small +- **Robustness to modeling errors**: \\(T\\) small in the frequency range of the expected model undertainties + +
+ + +## SISO tradeoff {#siso-tradeoff} + +We want \\(S\\) small for command following and disturbance rejection. +We want \\(T\\) small to remain insensitive to sensor noise and modeling errors and to reduce control sensitivity. + +However we cannot keep both \\(S\\) and \\(T\\) small as \\(S(s)+T(s)=1\\). + +We must determine some **tradeoff** between the sensitivity and the complementary sensitivity functions. + +Usually, reference signals and disturbances occur at low frequencies, while noise and modeling errors are concentrated at high frequencies. The tradeoff, in a SISO sense, is to make \\(|S(j\omega)|\\) small as low frequencies and \\(|T(j\omega)|\\) small at high frequencies. + + +## \\(H\_\infty\\) and weighting functions {#h-infty--and-weighting-functions} + +
+
+ +\\(\mathcal{H}\_\infty\\) control is a design technique with a state-space computation solution that utilizes frequency-dependent weighting functions to tune the controller's performance and robustness characteristics. + +
+ + + +{{< figure src="/ox-hugo/bibel92_general_plant.png" caption="Figure 2: \\(\mathcal{H}\_\infty\\) control framework" >}} + +New design framework (figure [2](#org71ea720)): \\(P(s)\\) is the **generalized plant** transfer function matrix: + +- \\(w\\): exogenous inputs +- \\(z\\): regulated performance output +- \\(u\\): control inputs +- \\(y\\): measured output variables + +The plant \\(P\\) has two inputs and two outputs, it can be decomposed into four sub-transfer function matrices: +\\[P = \begin{bmatrix}P\_{11} & P\_{12} \\ P\_{21} & P\_{22} \end{bmatrix}\\] + + +## Lower Linear Fractional Transformation {#lower-linear-fractional-transformation} + +The transformation from the input \\(w\\) to the output \\(z\\), \\(T\_{zw}\\) is called the **Lower Linear Fractional Transformation** \\(F\_l (P, K)\\). + +
+
+ +\\[T\_{zw} = F\_l (P, K) = P\_{11} + P\_{12}K (I-P\_{22})^{-1} P\_{21}\\] + +
+ +The \\(H\_\infty\\) control problem is to find a controller that minimizes \\(\\| T\_{zw} \\|\_\infty\\) over the space of all realizable controllers \\(K(s)\\) that stabilize the closed-loop system. + + +## Weights for inputs/outputs signals {#weights-for-inputs-outputs-signals} + +Since \\(S\\) and \\(T\\) cannot be minimized together at all frequency, **weights are introduced to shape the solutions**. Not only can \\(S\\) and \\(T\\) be weighted, but other regulated performance variables and inputs (figure [3](#org549c59f)). + + + +{{< figure src="/ox-hugo/bibel92_hinf_weights.png" caption="Figure 3: Input and Output weights in \\(\mathcal{H}\_\infty\\) framework" >}} + +The weights on the input and output variables are selected to reflect the spatial and **frequency dependence** of the respective signals and performance specifications. + +These inputs and output weighting functions are defined as rational, stable and **minimum-phase transfer function** (no poles or zero in the right half plane). + + +## General Guidelines for Weight Selection: \\(W\_S\\) {#general-guidelines-for-weight-selection--w-s} + +\\(W\_S\\) is selected to reflect the desired **performance characteristics**. +The sensitivity function \\(S\\) should have low gain at low frequency for good tracking performance and high gain at high frequencies to limit overshoot. +We have to select \\(W\_S\\) such that \\({W\_S}^-1\\) reflects the desired shape of \\(S\\). + +
+
+ +- **Low frequency gain**: set to the inverse of the desired steady state tracking error +- **High frequency gain**: set to limit overshoot (\\(0.1\\) to \\(0.5\\) is a good compromise between overshoot and response speed) +- **Crossover frequency**: chosen to limit the maximum closed-loop time constant (\\(\omega\_c \approx 1/\tau\\)) + +
+ + +## General Guidelines for Weight Selection: \\(W\_T\\) {#general-guidelines-for-weight-selection--w-t} + +We want \\(T\\) near unity for good tracking of reference and near zero for noise suppresion. + +
+
+ +A high pass weight is usualy used on \\(T\\) because the noise energy is mostly concentrated at high frequencies. It should have the following characteristics: + +- The **crossover frequency** is chosen to **limit the closed-loop bandwidth** +- The **high frequency gain** is set high to proide **sensor noise rejection** and high frequency gain attenuation + +
+ +When using both \\(W\_S\\) and \\(W\_T\\), it is important to make sure that the magnitude of theise weights at the crossover frequency is less that one to not violate \\(S+T=1\\). + + +## Unmodeled dynamics weighting function {#unmodeled-dynamics-weighting-function} + +Another method of limiting the controller bandwidth and providing high frequency gain attenuation is to use a high pass weight on an **unmodeled dynamics uncertainty block** that may be added from the plant input to the plant output (figure [4](#org379d5b1)). + + + +{{< figure src="/ox-hugo/bibel92_unmodeled_dynamics.png" caption="Figure 4: Unmodeled dynamics model" >}} + +The weight is chosen to cover the expected worst case magnitude of the unmodeled dynamics. A typical unmodeled dynamics weighting function is shown figure [5](#orgcc65489). + + + +{{< figure src="/ox-hugo/bibel92_weight_dynamics.png" caption="Figure 5: Example of unmodeled dynamics weight" >}} + + +## Inputs and Output weighting function {#inputs-and-output-weighting-function} + +It is possible to **weight the control input and actuator rate**. +This is used to **prevent actuator saturation** and **limit amplification of sensor noise signals** on the control input signal. + +Typically actuator input weights are constant over frequency and set at the inverse of the saturation limit. + + +## Order of the weighting functions {#order-of-the-weighting-functions} + +**The order of the optimal controller is equal to the order of the nominal plant model plus the order of the weights**. The complexity of the controller is increase as the order of the weights increases. + +**The order of the weights should be kept reasonably low** to reduce the order of th resulting optimal compensator and avoid potential convergence problems in the DK interactions. + +# Bibliography +Bibel, J. E., & Malyevac, D. S., *Guidelines for the selection of weighting functions for h-infinity control* (1992). [↩](#5b41da575e27e6e86f1a1410a0170836) diff --git a/content/paper/butler11_posit_contr_lithog_equip.md b/content/paper/butler11_posit_contr_lithog_equip.md new file mode 100644 index 0000000..7d3c6f8 --- /dev/null +++ b/content/paper/butler11_posit_contr_lithog_equip.md @@ -0,0 +1,20 @@ ++++ +title = "Position control in lithographic equipment" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Multivariable Control]({{< relref "multivariable_control" >}}), [Positioning Stations]({{< relref "positioning_stations" >}}) + +Reference +: (Hans Butler, 2011) + +Author(s) +: Butler, H. + +Year +: 2011 + +# Bibliography +Butler, H., *Position control in lithographic equipment*, IEEE Control Systems, *31(5)*, 28–47 (2011). http://dx.doi.org/10.1109/mcs.2011.941882 [↩](#6a014e3a2ee3e41d20bd0644654c56f0) diff --git a/content/paper/chen00_ident_decoup_contr_flexur_joint_hexap.md b/content/paper/chen00_ident_decoup_contr_flexur_joint_hexap.md new file mode 100644 index 0000000..ad7ed55 --- /dev/null +++ b/content/paper/chen00_ident_decoup_contr_flexur_joint_hexap.md @@ -0,0 +1,22 @@ ++++ +title = "Identification and decoupling control of flexure jointed hexapods" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Stewart Platforms]({{< relref "stewart_platforms" >}}), [Flexible Joints]({{< relref "flexible_joints" >}}) + +Reference +: (Yixin Chen \& McInroy, 2000) + +Author(s) +: Chen, Y., & McInroy, J. + +Year +: 2000 + +# Bibliography +Chen, Y., & McInroy, J., *Identification and decoupling control of flexure jointed hexapods*, In , Proceedings 2000 ICRA. Millennium Conference. IEEE International Conference on Robotics and Automation. Symposia Proceedings (Cat. No.00CH37065) (pp. ) (2000). : . [↩](#ba05ff213f8e5963d91559d95becfbdb) diff --git a/content/paper/collette11_review_activ_vibrat_isolat_strat.md b/content/paper/collette11_review_activ_vibrat_isolat_strat.md new file mode 100644 index 0000000..1a61d73 --- /dev/null +++ b/content/paper/collette11_review_activ_vibrat_isolat_strat.md @@ -0,0 +1,79 @@ ++++ +title = "Review of active vibration isolation strategies" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Vibration Isolation]({{< relref "vibration_isolation" >}}) + +Reference +: (Christophe Collette {\it et al.}, 2011) + +Author(s) +: Collette, C., Janssens, S., & Artoos, K. + +Year +: 2011 + + +## Background and Motivations {#background-and-motivations} + + +### Passive Isolation Tradeoffs {#passive-isolation-tradeoffs} + +
+
+ +\\[ X(s) = \underbrace{\frac{cs + k}{ms^2 + cs + k}}\_{T\_{wx}(s)} W(s) + \underbrace{\frac{1}{ms^2 + cs + k}}\_{T\_{Fx}(s)} F(s) \\] + +
+ +- \\(T\_{wx}(s)\\) is called the **transmissibility** of the isolator. It characterize the way seismic vibrations \\(w\\) are transmitted to the equipment. +- \\(T\_{Fx}(s)\\) is called the **compliance**. It characterize the capacity of disturbing forces \\(F\\) to create motion \\(x\\) of the equipment. + +In order to minimize the vibrations of a sensitive equipment, a general objective to design a good isolator is to minimize both \\(\abs{T\_{wx}}\\) and \\(\abs{T\_{Fx}}\\) in the frequency range of interest. + +To decrease the amplitude of the overshoot at the resonance frequency, **damping** can be increased. +The price to pay is degradation of the isolation at high frequency (the roll off becomes \\(-1\\) instead of \\(-2\\)). + +**First Trade-off**: Trade-off between damping and isolation. + +To improve the transmissibility, the resonance frequency can be decreased. +However, the systems becomes more sensitive to external force \\(F\\) applied on the equipment. + +**Second trade-off**: Trade-off between isolation and robustness to external force + + +### Active Isolation {#active-isolation} + +We apply a feedback control. +The general expression of the force delivered by the actuator is \\(f = g\_a \ddot{x} + g\_v \dot{x} + g\_p x\\). \\(g\_a\\), \\(g\_v\\) and \\(g\_p\\) are constant gains. + + +
+ Table 1: + Active isolation techniques +
+ +| **Feedback Signal** | **Effect** | **Applications** | +|---------------------|------------------------------------------|------------------| +| Acceleration | Add virtual mass | Few | +| Velocity | Add virtual dashpot connected to the sky | Sky-Hook Damping | +| Position | Add virtual spring connected to the sky | Sky-Hook Spring | + + +## Practical Realizations {#practical-realizations} + + +## Sensor Limitations {#sensor-limitations} + + +## Conclusions {#conclusions} + + + +{{< figure src="/ox-hugo/collette11_comp_isolation_strategies.png" caption="Figure 1: Comparison of Active Vibration Isolation Strategies" >}} + +# Bibliography +Collette, C., Janssens, S., & Artoos, K., *Review of active vibration isolation strategies*, Recent Patents on Mechanical Engineeringe, *4(3)*, 212–219 (2011). http://dx.doi.org/10.2174/2212797611104030212 [↩](#2d69d483f210ca387ca8061596ec27ea) diff --git a/content/paper/collette14_vibrat.md b/content/paper/collette14_vibrat.md new file mode 100644 index 0000000..a207053 --- /dev/null +++ b/content/paper/collette14_vibrat.md @@ -0,0 +1,104 @@ ++++ +title = "Vibration control of flexible structures using fusion of inertial sensors and hyper-stable actuator-sensor pairs" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Vibration Isolation]({{< relref "vibration_isolation" >}}), [Sensor Fusion]({{< relref "sensor_fusion" >}}) + +Reference +: (Collette \& Matichard, 2014) + +Author(s) +: Collette, C., & Matichard, F. + +Year +: 2014 + + +## Introduction {#introduction} + +Sensor fusion is used to combine the benefits of different types of sensors: + +- Relative sensor for DC positioning capability at low frequency +- Inertial sensors for isolation at high frequency +- Force sensor / collocated sensor to improve the robustness + + +## Different types of sensors {#different-types-of-sensors} + +In this paper, three types of sensors are used. Their advantages and disadvantages are summarized table [1](#table--tab:sensors). + +> Several types of sensors can be used for the feedback control of vibration isolation systems: +> +> - Feedback control based on **relative motion sensors** (inductive, capactive, ferromagnetic sensors...) typically permits to servo-position a system or platform relative to a reference (e.g. floor or support base), but does not provide isolation from the ground motion. +> - Feedback control based on **force sensors** typically lowers the effective natural frequency, and therefore increases the isolation, but sacrifices the systems compliance in doing so. +> - Feedback control based on **inertial sensors** (geophones, seismometers, accelerometers...) improves not only the vibration isolation but also the compliance. Inertial sensors are, however, AC coupled and noisy at low frequencies. + + +
+ Table 1: + Types of sensors +
+ +| Sensors | Advantages | Disadvantages | +|------------------|----------------------------------|---------------------------------------| +| Relative motion | Servo-position | No isolation from gorund motion | +| Force sensors | Improve isolation | Increase compliance | +| Inertial sensors | Improve isolation and compliance | AC couple and noisy at high frequency | + + +## Inertial Control and sensor fusion configurations {#inertial-control-and-sensor-fusion-configurations} + +For a simple 1DoF model, two fusion-sensor configuration are studied. The results are summarized Table [2](#table--tab:fusion-trade-off). + + +
+ Table 2: + Sensor fusion configurations +
+ +| Low freq. sensor | High freq. sensor | Transmissibility | Compliance | Trade-off | +|------------------|-------------------|------------------|------------|----------------------------------------------------| +| Inertial | Force sensor | Unchanged | Degraded | Sensor noise filtering / compliance degradation | +| Inertial | Relative sensor | Degraded | Unchanged | Isolation in the bandwidth / amplification outside | + + +## Flexible structure {#flexible-structure} + +Flexibility is added between the inertial sensor and the actuator. +Now the sensor and actuator are not collocated anymore and the system is unstable because there is no zero between the two poles. +We use sensor fusion to obtain stability at high frequency. + + +### Inertial and small accelerometer {#inertial-and-small-accelerometer} + +The idea is to use a small accelerometer which is easier to locate near the actuator at high frequency. +However, it is important to verify that the noise introduced by the accelerometer does not degrades too much the isolation performance. + + +### Inertial and force sensor {#inertial-and-force-sensor} + +Here the advantage is that the deformation mode is almost not present in the open-loop transfer function. +This simplifies the loop shaping of the controller. + + +### Inertial and relative sensor {#inertial-and-relative-sensor} + +The relative sensor introduces coupling between both side of the actuator which induces degradation of the isolation at high frequency. However, the compliance remains unchanged at high frequency. + + +## Conclusion {#conclusion} + +Fusion of inertial instruments with sensors collocated with the actuator permits to increase the feedback control bandwidth of active isolation systems. + +Three types of sensors have been considered for the high frequency part of the fusion: + +- The fusion with a **relative sensor** improves the stability but compromises the transmissibility. It can be of interested for stiff suspension where high frequency isolation can be sacrified to improve stability. +- The fusion with an **accelerometre** is used to increase the loop gain. However, as the accelerometer is not dual with the actuator, there is no guaranty stability when the isolation stage is mounted on a flexible support. +- The fusion with a **force sensor** can be used to increase the loop gain with little effect on the compliance and passive isolation, provided that the blend is possible and that no active damping of flexible modes is required. + +# Bibliography +Collette, C., & Matichard, F., *Vibration control of flexible structures using fusion of inertial sensors and hyper-stable actuator-sensor pairs*, In , International Conference on Noise and Vibration Engineering (ISMA2014) (pp. ) (2014). : . [↩](#1223611da2f9b157af97503a4fec7631) diff --git a/content/paper/collette15_sensor_fusion_method_high_perfor.md b/content/paper/collette15_sensor_fusion_method_high_perfor.md new file mode 100644 index 0000000..d0a64ad --- /dev/null +++ b/content/paper/collette15_sensor_fusion_method_high_perfor.md @@ -0,0 +1,28 @@ ++++ +title = "Sensor fusion methods for high performance active vibration isolation systems" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Sensor Fusion]({{< relref "sensor_fusion" >}}), [Vibration Isolation]({{< relref "vibration_isolation" >}}) + +Reference +: (Collette \& Matichard, 2015) + +Author(s) +: Collette, C., & Matichard, F. + +Year +: 2015 + +In order to have good stability margins, it is common practice to collocate sensors and actuators. This ensures alternating poles and zeros along the imaginary axis. Then, each phase lag introduced by the poles is compensed by phase leag introduced by the zeroes. This guarantees stability and such system is referred to as **hyperstable**. + +In this paper, we study and compare different sensor fusion methods combining inertial sensors at low frequency with sensors adding stability at high frequency. +The stability margins of the controller can be significantly increased with no or little effect on the low-frequency active isolation, provided that the two following conditions are fulfilled: + +- the high frequency sensor and the actuator are dual +- there exists a bandwidth where we can superimpose the open loop transfer functions obtained with the two sensors. + +# Bibliography +Collette, C., & Matichard, F., *Sensor fusion methods for high performance active vibration isolation systems*, Journal of Sound and Vibration, *342(nil)*, 1–21 (2015). http://dx.doi.org/10.1016/j.jsv.2015.01.006 [↩](#7772841a8f05142ec30f0f6daae20932) diff --git a/content/paper/dasgupta00_stewar_platf_manip.md b/content/paper/dasgupta00_stewar_platf_manip.md new file mode 100644 index 0000000..214cb72 --- /dev/null +++ b/content/paper/dasgupta00_stewar_platf_manip.md @@ -0,0 +1,37 @@ ++++ +title = "The stewart platform manipulator: a review" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Stewart Platforms]({{< relref "stewart_platforms" >}}) + +Reference +: (Bhaskar Dasgupta \& Mruthyunjaya, 2000) + +Author(s) +: Dasgupta, B., & Mruthyunjaya, T. + +Year +: 2000 + + +
+ Table 1: + Parallel VS serial manipulators +
+ +| | **Advantages** | **Disadvantages** | +|--------------|---------------------------|-----------------------| +| **Serial** | Manoeuverability | Poor precision | +| | Large workspace | Bends under high load | +| | | Vibrate at high speed | +| **Parallel** | High stiffness | Small workspace | +| | Good dynamic performances | | +| | Precise positioning | | + +The generalized Stewart platforms consists of two rigid bodies (referred to as the base and the platoform) connected through six extensible legs, each with sherical joints at both ends. + +# Bibliography +Dasgupta, B., & Mruthyunjaya, T., *The stewart platform manipulator: a review*, Mechanism and Machine Theory, *35(1)*, 15–40 (2000). http://dx.doi.org/10.1016/s0094-114x(99)00006-3 [↩](#ad17e03f0fbbcc1a070557d7b5a0e1e1) diff --git a/content/paper/devasia07_survey_contr_issues_nanop.md b/content/paper/devasia07_survey_contr_issues_nanop.md new file mode 100644 index 0000000..11e3965 --- /dev/null +++ b/content/paper/devasia07_survey_contr_issues_nanop.md @@ -0,0 +1,30 @@ ++++ +title = "A survey of control issues in nanopositioning" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: + + +Reference +: (Devasia {\it et al.}, 2007) + +Author(s) +: Devasia, S., Eleftheriou, E., & Moheimani, S. R. + +Year +: 2007 + +- Talks about Scanning Tunneling Microscope (STM) and Scanning Probe Microscope (SPM) +- Piezoelectric actuators: Creep, Hysteresis, Vibrations, Modeling errors +- Interesting analysis about Bandwidth-Precision-Range tradeoffs +- Control approaches for piezoelectric actuators: feedforward, Feedback, Iterative, Sensorless controls + + + +{{< figure src="/ox-hugo/devasia07_piezoelectric_tradeoff.png" caption="Figure 1: Tradeoffs between bandwidth, precision and range" >}} + +# Bibliography +Devasia, S., Eleftheriou, E., & Moheimani, S. R., *A survey of control issues in nanopositioning*, IEEE Transactions on Control Systems Technology, *15(5)*, 802–823 (2007). [↩](#8ce53b8a612ce8ae3eb616cd1ed05630) diff --git a/content/paper/fleming10_nanop_system_with_force_feedb.md b/content/paper/fleming10_nanop_system_with_force_feedb.md new file mode 100644 index 0000000..d1837eb --- /dev/null +++ b/content/paper/fleming10_nanop_system_with_force_feedb.md @@ -0,0 +1,122 @@ ++++ +title = "Nanopositioning system with force feedback for high-performance tracking and vibration control" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Sensor Fusion]({{< relref "sensor_fusion" >}}), [Force Sensors]({{< relref "force_sensors" >}}) + +Reference +: (Fleming, 2010) + +Author(s) +: Fleming, A. + +Year +: 2010 + +Summary: + +- The noise generated by a piezoelectric force sensor is much less than a capacitive sensor +- Dynamical model of a piezoelectric stack actuator and piezoelectric force sensor +- Noise of a piezoelectric force sensor +- IFF with a piezoelectric stack actuator and piezoelectric force sensor +- A force sensor is used as a displacement sensor below the frequency of the first zero +- Sensor fusion architecture with a capacitive sensor and a force sensor and using complementary filters +- Virtual sensor fusion architecture (called low-frequency bypass) +- Analog implementation of the control strategies to avoid quantization noise, finite resolution and sampling delay + + +## Model of a multi-layer monolithic piezoelectric stack actuator {#model-of-a-multi-layer-monolithic-piezoelectric-stack-actuator} + + + +{{< figure src="/ox-hugo/fleming10_piezo_model.png" caption="Figure 1: Schematic of a multi-layer monolithic piezoelectric stack actuator model" >}} + +The actuator experiences an internal stress in response to an applied voltage. +This stress is represented by the voltage dependent force \\(F\_a\\) and is related to free displacement by +\\[ \Delta L = \frac{F\_a}{k\_a} \\] + +- \\(\Delta L\\) is the change in actuator length in [m] +- \\(k\_a\\) is the actuator stiffness in [N/m] + +The developed force \\(F\_a\\) is related to the applied voltage by: +\\[ \Delta L = d\_{33} n V\_a \\] + +- \\(d\_{33}\\) is the piezoelectric strain constant in [m/V] +- \\(n\\) is the number of layers +- \\(V\_a\\) is the applied voltage in [V] + +Combining the two equations, we obtain: +\\[ F\_a = d\_{33} n k\_a V\_a \\] + +The ratio of the developed force to applied voltage is \\(d\_{33} n k\_a\\) in [N/V]. +We denote this constant by \\(g\_a\\) and: +\\[ F\_a = g\_a V\_a, \quad g\_a = d\_{33} n k\_a \\] + + +## Dynamics of a piezoelectric force sensor {#dynamics-of-a-piezoelectric-force-sensor} + +Piezoelectric force sensors provide a high sensitivity and bandwidth with low noise at high frequencies. + +If a **single wafer** of piezoelectric material is sandwiched between the actuator and platform: +\\[ D = d\_{33} T \\] + +- \\(D\\) is the amount of generated charge per unit area in \\([C/m^2]\\) +- \\(T\\) is the stress in \\([N/m^2]\\) +- \\(d\_{33}\\) is the piezoelectric strain constant in \\([m/V] = [C/N]\\) + +The generated charge is then +\\[ q = d\_{33} F\_s \\] + +If an **n-layer** piezoelectric transducer is used as a force sensor, the generated charge is then: +\\[ q = n d\_{33} F\_s \\] + +--- + +We can use a **charge amplifier** to measure the force \\(F\_s\\). + +{{< figure src="/ox-hugo/fleming10_charge_ampl_piezo.png" caption="Figure 2: Electrical model of a piezoelectric force sensor is shown in gray. Developed charge \\(q\\) is proportional to the strain and hence the force experienced by the sensor. Op-amp charge amplifier produces an output voltage \\(V\_s\\) equal to \\(-q/C\_s\\)" >}} + +The output voltage \\(V\_s\\) is equal to +\\[ V\_s = -\frac{q}{C\_s} = -\frac{n d\_{33}F\_s}{C\_s} \\] +that is, the scaling between the force and voltage is \\(-\frac{n d\_{33}F\_s}{C\_s}\ [V/N]\\) . + +--- + +We can also use a voltage amplifier. +In that case, the generated charge is deposited on the transducer's internal capacitance. + +The open-circuit voltage of a piezoelectric force sensor is: +\\[ V\_s = \frac{n d\_{33} F\_s}{C} \\] + +- \\(C\\) is the transducer capacitance defined by \\(C = n \epsilon\_T A / h\\) in [F] + - \\(A\\) is the area in \\([m^2]\\) + - \\(h\\) is the layer thickness in [m] + - \\(\epsilon\_T\\) is the dielectric permittivity under a constant stress in \\([F/m]\\) + +We obtain +\\[ V\_s = g\_s F\_s, \quad g\_s = \frac{n d\_{33}}{C} \\] + + +## Noise of a piezoelectric force sensor {#noise-of-a-piezoelectric-force-sensor} + +As piezoelectric sensors have a capacitive source impedance, the sensor noise density \\(N\_{V\_s}(\omega)\\) is primarily due to current noise \\(i\_n\\) reacting the capacitive source impedance: +\\[ N\_{V\_s}(\omega) = i\_n \frac{1}{C \omega} \\] + +- \\(N\_{V\_s}\\) is the measured noise in \\(V/\sqrt{\text{Hz}}\\) +- \\(i\_n\\) is the current noise in \\(A/\sqrt{\text{Hz}}\\) +- \\(C\\) is the capacitance of the piezoelectric in \\(F\\) + +The current noise density of a general purpose LM833 FET-input op-amp is \\(0.5\ pA/\sqrt{\text{Hz}}\\). +The capacitance of a piezoelectric stack is typically between \\(1 \mu F\\) and \\(100 \mu F\\). + +# Bibliography +Fleming, A., *Nanopositioning system with force feedback for high-performance tracking and vibration control*, IEEE/ASME Transactions on Mechatronics, *15(3)*, 433–447 (2010). http://dx.doi.org/10.1109/tmech.2009.2028422 [↩](#c823f68dd2a72b9667a61b3c046b4731) + + +## Backlinks {#backlinks} + +- [Actuators]({{< relref "actuators" >}}) +- [Force Sensors]({{< relref "force_sensors" >}}) diff --git a/content/paper/fleming12_estim.md b/content/paper/fleming12_estim.md new file mode 100644 index 0000000..3b3d2d1 --- /dev/null +++ b/content/paper/fleming12_estim.md @@ -0,0 +1,22 @@ ++++ +title = "Estimating the resolution of nanopositioning systems from frequency domain data" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: + + +Reference +: (Andrew Fleming, 2012) + +Author(s) +: Fleming, A. J. + +Year +: 2012 + +# Bibliography +Fleming, A. J., *Estimating the resolution of nanopositioning systems from frequency domain data*, In , 2012 IEEE International Conference on Robotics and Automation (pp. ) (2012). : . [↩](#a1cc9b70316a7dda2f652efd146caf84) diff --git a/content/paper/fleming13_review_nanom_resol_posit_sensor.md b/content/paper/fleming13_review_nanom_resol_posit_sensor.md new file mode 100644 index 0000000..e19aef2 --- /dev/null +++ b/content/paper/fleming13_review_nanom_resol_posit_sensor.md @@ -0,0 +1,185 @@ ++++ +title = "A review of nanometer resolution position sensors: operation and performance" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Position Sensors]({{< relref "position_sensors" >}}) + +Reference +: (Andrew Fleming, 2013) + +Author(s) +: Fleming, A. J. + +Year +: 2013 + +- Define concise performance metric and provide expressions for errors sources (non-linearity, drift, noise) +- Review current position sensor technologies and compare their performance + + +## Sensor Characteristics {#sensor-characteristics} + + +### Calibration and nonlinearity {#calibration-and-nonlinearity} + +Usually quoted as a percentage of the fill-scale range (FSR): + +\begin{equation} + \text{mapping error (\%)} = \pm 100 \frac{\max{}|e\_m(v)|}{\text{FSR}} +\end{equation} + +With \\(e\_m(v)\\) is the mapping error. + + + +{{< figure src="/ox-hugo/fleming13_mapping_error.png" caption="Figure 1: The actual position versus the output voltage of a position sensor. The calibration function \\(f\_{cal}(v)\\) is an approximation of the sensor mapping function \\(f\_a(v)\\) where \\(v\\) is the voltage resulting from a displacement \\(x\\). \\(e\_m(v)\\) is the residual error." >}} + + +### Drift and Stability {#drift-and-stability} + +If the shape of the mapping function actually varies with time, the maximum error due to drift must be evaluated by finding the worst-case mapping error. + + + +{{< figure src="/ox-hugo/fleming13_drift_stability.png" caption="Figure 2: The worst case range of a linear mapping function \\(f\_a(v)\\) for a given error in sensitivity and offset." >}} + + +### Bandwidth {#bandwidth} + +The bandwidth of a position sensor is the frequency at which the magnitude of the transfer function \\(P(s) = v(s)/x(s)\\) drops by \\(3\,dB\\). + +Although the bandwidth specification is useful for predicting the resolution of sensor, it reveals very little about the measurement errors caused by sensor dynamics. + +The frequency domain position error is + +\begin{equation} + \begin{aligned} + e\_{bw}(s) &= x(s) - v(s) \\\\\\ + &= x(s) (1 - P(s)) + \end{aligned} +\end{equation} + +If the actual position is a sinewave of peak amplitude \\(A = \text{FSR}/2\\): + +\begin{equation} + \begin{aligned} + e\_{bw} &= \pm \frac{\text{FSR}}{2} |1 - P(s)| \\\\\\ + &\approx \pm A n \frac{f}{f\_c} + \end{aligned} +\end{equation} + +with \\(n\\) is the low pass filter order corresponding to the sensor dynamics and \\(f\_c\\) is the measurement bandwidth. + +Thus, the sensor bandwidth must be significantly higher than the operating frequency if dynamic errors are to be avoided. + + +### Noise {#noise} + +In addition to the actual position signal, all sensors produce some additive measurement noise. +In many types of sensor, the majority of noise arises from the thermal noise in resistors and the voltage and current noise in conditioning circuit transistors. +These noise processes can usually be approximated by a Gaussian random process.
+ +A Gaussian random process is usually described by its autocorrelation function or its Power Spectral Density. + +The autocorrelation function of a random process \\(\mathcal{X}\\) is + +\begin{equation} + R\_{\mathcal{X}}(\tau) = E[\mathcal{X}(t)\mathcal{X}(t + \tau)] +\end{equation} + +where \\(E\\) is the expected value operator. + +The variance of the process is equal to \\(R\_\mathcal{X}(0)\\) and is the expected value of the varying part squared: + +\begin{equation} + \text{Var} \mathcal{X} = E \left[ (\mathcal{X} - E[\mathcal{X}])^2 \right] +\end{equation} + +The standard deviation \\(\sigma\\) is the square root of the variance: + +\begin{equation} + \sigma\_\mathcal{X} = \sqrt{\text{Var} \mathcal{X}} +\end{equation} + +The standard deviation is also the Root Mean Square (RMS) value of a zero-mean random process. + +The Power Spectral Density \\(S\_\mathcal{X}(f)\\) of a random process represents the distribution of power (or variance) across frequency \\(f\\). + +For example, if the random process under consideration was measured in volts, the power spectral density would have the units of \\(V^2/\text{Hz}\\). + +The Power Spectral Density can be obtained from the autocorrelation function from the Wiener-Khinchin relation: + +\begin{equation} + S\_{\mathcal{X}} = 2 \mathcal{F}\\{ R\_\mathcal{X}(\tau) \\} = 2 \int\_{-\infty}^{\infty} R\_\mathcal{X}(\tau) e^{-2j\pi f \tau} d\tau +\end{equation} + +If the power Spectral Density is known, the variance of the generating process can be found from the area under the curve: + +\begin{equation} + \sigma\_\mathcal{X}^2 = E[\mathcal{X}^2(t)] = R\_\mathcal{X}(0) = \int\_0^\infty S\_\mathcal{X}(f) df +\end{equation} + +Rather than plotting the frequency distribution of power, it is often convenient to plot the frequency distribution of the standard deviation, which is referred to as the spectral density. +It is related to the power spectral density by a square root: + +\begin{equation} + \text{spectral density} = \sqrt{S\_\mathcal{X}(f)} +\end{equation} + +The units of \\(\sqrt{S\_\mathcal{X}(f)}\\) are \\(\text{units}/\sqrt{Hz}\\). + +The spectral density if preferred in the electronics literature as the RMS value of a noise process can be determined directly from the noise density and effective bandwidth. + + +### Resolution {#resolution} + +The random noise of a position sensor causes an uncertainty in the measured position. +If the distance between two measured locations is smaller than the uncertainty, it is possible to mistake one point for the other. + +To characterize the resolution, we use the probability that the measured value is within a certain error bound. + +If the measurement noise is approximately Gaussian, the resolution can be quantified by the standard deviation \\(\sigma\\) (RMS value). + +The empirical rule states that there is a \\(99.7\%\\) probability that a sample of a Gaussian random process lie within \\(\pm 3 \sigma\\). +This if we define the resolution as \\(\delta = 6 \sigma\\), we will referred to as the \\(6\sigma\text{-resolution}\\). + +Another important parameter that must be specified when quoting resolution is the sensor bandwidth. +There is usually a trade-off between bandwidth and resolution (figure [3](#orgd8c6776)). + + + +{{< figure src="/ox-hugo/fleming13_tradeoff_res_bandwidth.png" caption="Figure 3: The resolution versus banwidth of a position sensor." >}} + +Many type of sensor have a limited full-scale-range (FSR) and tend to have an approximated proportional relationship between the resolution and range. +As a result, it is convenient to consider the ratio of resolution to the FSR, or equivalently, the dynamic range (DNR). +A convenient method for reporting this ratio is in parts-per-million (ppm): + +\begin{equation} + \text{DNR}\_{\text{ppm}} = 10^6 \frac{\text{full scale range}}{6\sigma\text{-resolution}} +\end{equation} + + +## Comparison and summary {#comparison-and-summary} + + +
+ Table 1: + Summary of position sensor characteristics. The dynamic range (DNR) and resolution are approximations based on a full-scale range of \(100\,\mu m\) and a first order bandwidth of \(1\,kHz\) +
+ +| Sensor Type | Range | DNR | Resolution | Max. BW | Accuracy | +|----------------|----------------------------------|---------|------------|----------|-----------| +| Metal foil | \\(10-500\,\mu m\\) | 230 ppm | 23 nm | 1-10 kHz | 1% FSR | +| Piezoresistive | \\(1-500\,\mu m\\) | 5 ppm | 0.5 nm | >100 kHz | 1% FSR | +| Capacitive | \\(10\,\mu m\\) to \\(10\,mm\\) | 24 ppm | 2.4 nm | 100 kHz | 0.1% FSR | +| Electrothermal | \\(10\,\mu m\\) to \\(1\,mm\\) | 100 ppm | 10 nm | 10 kHz | 1% FSR | +| Eddy current | \\(100\,\mu m\\) to \\(80\,mm\\) | 10 ppm | 1 nm | 40 kHz | 0.1% FSR | +| LVDT | \\(0.5-500\,mm\\) | 10 ppm | 5 nm | 1 kHz | 0.25% FSR | +| Interferometer | Meters | | 0.5 nm | >100kHz | 1 ppm FSR | +| Encoder | Meters | | 6 nm | >100kHz | 5 ppm FSR | + +# Bibliography +Fleming, A. J., *A review of nanometer resolution position sensors: operation and performance*, Sensors and Actuators A: Physical, *190(nil)*, 106–126 (2013). http://dx.doi.org/10.1016/j.sna.2012.10.016 [↩](#3fb5b61524290e36d639a4fac65703d0) diff --git a/content/paper/furqan17_studies_stewar_platf_manip.md b/content/paper/furqan17_studies_stewar_platf_manip.md new file mode 100644 index 0000000..4475802 --- /dev/null +++ b/content/paper/furqan17_studies_stewar_platf_manip.md @@ -0,0 +1,22 @@ ++++ +title = "Studies on stewart platform manipulator: a review" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Stewart Platforms]({{< relref "stewart_platforms" >}}) + +Reference +: (Mohd Furqan {\it et al.}, 2017) + +Author(s) +: Furqan, M., Suhaib, M., & Ahmad, N. + +Year +: 2017 + +Lots of references. + +# Bibliography +Furqan, M., Suhaib, M., & Ahmad, N., *Studies on stewart platform manipulator: a review*, Journal of Mechanical Science and Technology, *31(9)*, 4459–4470 (2017). http://dx.doi.org/10.1007/s12206-017-0846-1 [↩](#cc10fe9545c7c381cc2b610e8f91a071) diff --git a/content/paper/furutani04_nanom_cuttin_machin_using_stewar.md b/content/paper/furutani04_nanom_cuttin_machin_using_stewar.md new file mode 100644 index 0000000..d130182 --- /dev/null +++ b/content/paper/furutani04_nanom_cuttin_machin_using_stewar.md @@ -0,0 +1,38 @@ ++++ +title = "Nanometre-cutting machine using a stewart-platform parallel mechanism" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Stewart Platforms]({{< relref "stewart_platforms" >}}), [Flexible Joints]({{< relref "flexible_joints" >}}) + +Reference +: (Katsushi Furutani {\it et al.}, 2004) + +Author(s) +: Furutani, K., Suzuki, M., & Kudoh, R. + +Year +: 2004 + +- Lever mechanism to amplify the motion of piezoelectric stack actuators +- Use of flexure joints +- Eddy current displacement sensors for control (decentralized) + +{{< figure src="/ox-hugo/furutani04_ctrl_arch.png" >}} + +- Isotropic performance (cubic configuration even if not said so) + +Possible sources of error: + +- position error of the link ends in assembly => simulation of position error and it is not significant +- Inaccurate modelling of the links +- insufficient generative force +- unwanted deformation of the links + +To minimize the errors, a calibration is done between the required leg length and the wanted platform pose. +Then, it is fitted with 4th order polynomial and included in the control architecture. + +# Bibliography +Furutani, K., Suzuki, M., & Kudoh, R., *Nanometre-cutting machine using a stewart-platform parallel mechanism*, Measurement Science and Technology, *15(2)*, 467–474 (2004). http://dx.doi.org/10.1088/0957-0233/15/2/022 [↩](#bedab298599c84f60236313ebaad2714) diff --git a/content/paper/gao15_measur_techn_precis_posit.md b/content/paper/gao15_measur_techn_precis_posit.md new file mode 100644 index 0000000..a843ae7 --- /dev/null +++ b/content/paper/gao15_measur_techn_precis_posit.md @@ -0,0 +1,20 @@ ++++ +title = "Measurement technologies for precision positioning" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Position Sensors]({{< relref "position_sensors" >}}) + +Reference +: (Gao {\it et al.}, 2015) + +Author(s) +: Gao, W., Kim, S., Bosse, H., Haitjema, H., Chen, Y., Lu, X., Knapp, W., … + +Year +: 2015 + +# Bibliography +Gao, W., Kim, S., Bosse, H., Haitjema, H., Chen, Y., Lu, X., Knapp, W., …, *Measurement technologies for precision positioning*, CIRP Annals, *64(2)*, 773–796 (2015). http://dx.doi.org/10.1016/j.cirp.2015.05.009 [↩](#b820b918ced36901ea0ad4bf653202c6) diff --git a/content/paper/garg07_implem_chall_multiv_contr.md b/content/paper/garg07_implem_chall_multiv_contr.md new file mode 100644 index 0000000..aad78ce --- /dev/null +++ b/content/paper/garg07_implem_chall_multiv_contr.md @@ -0,0 +1,39 @@ ++++ +title = "Implementation challenges for multivariable control: what you did not learn in school!" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Multivariable Control]({{< relref "multivariable_control" >}}) + +Reference +: (Sanjay Garg, 2007) + +Author(s) +: Garg, S. + +Year +: 2007 + +Discusses: + +- When to use multivariable control and when not to? +- Two major issues with implementing multivariable control: **gain scheduling** and **integrator wind up protection** + +> Inline simple gain and phase margin measured for SISO, "robustness" determination of multivariable control requires complex analyses using **singular value techniques** and **Monte Carlo** simulations. + +**When to use multivariable control**: + +- System has high input/output coupling and not much separation between loop bandwidth +- System is complex with large number of states +- When sequential SISO loop closure will not meet performance requirements + +Importance of having a mechanism to limit the control rate in the synthesis process. +The control rate should be weighted appropriately in order to not saturate the system and stay in the linearity regime. + +- importance of scaling the plant prior to synthesis and also replacing pure integrators with slow poles + +# Bibliography +Garg, S., *Implementation challenges for multivariable control: what you did not learn in school!*, In , AIAA Guidance, Navigation and Control Conference and Exhibit (pp. ) (2007). : . [↩](#07f63c751c1d9fcfe628178688f7ec24) diff --git a/content/paper/geng95_intel_contr_system_multip_degree.md b/content/paper/geng95_intel_contr_system_multip_degree.md new file mode 100644 index 0000000..188495f --- /dev/null +++ b/content/paper/geng95_intel_contr_system_multip_degree.md @@ -0,0 +1,24 @@ ++++ +title = "An intelligent control system for multiple degree-of-freedom vibration isolation" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Stewart Platforms]({{< relref "stewart_platforms" >}}), [Vibration Isolation]({{< relref "vibration_isolation" >}}) + +Reference +: (Jason Geng {\it et al.}, 1995) + +Author(s) +: Geng, Z. J., Pan, G. G., Haynes, L. S., Wada, B. K., & Garba, J. A. + +Year +: 1995 + + + +{{< figure src="/ox-hugo/geng95_control_structure.png" caption="Figure 1: Local force feedback and adaptive acceleration feedback for active isolation" >}} + +# Bibliography +Geng, Z. J., Pan, G. G., Haynes, L. S., Wada, B. K., & Garba, J. A., *An intelligent control system for multiple degree-of-freedom vibration isolation*, Journal of Intelligent Material Systems and Structures, *6(6)*, 787–800 (1995). http://dx.doi.org/10.1177/1045389x9500600607 [↩](#76af0f5c88615842fa91864c8618fb58) diff --git a/content/paper/hanieh03_activ_stewar.md b/content/paper/hanieh03_activ_stewar.md new file mode 100644 index 0000000..208cb46 --- /dev/null +++ b/content/paper/hanieh03_activ_stewar.md @@ -0,0 +1,34 @@ ++++ +title = "Active isolation and damping of vibrations via stewart platform" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Stewart Platforms]({{< relref "stewart_platforms" >}}), [Vibration Isolation]({{< relref "vibration_isolation" >}}), [Active Damping]({{< relref "active_damping" >}}) + +Reference +: @phdthesis{hanieh03_activ_stewar, + author = {Hanieh, Ahmed Abu}, + school = {Universit{\'e} Libre de Bruxelles, Brussels, Belgium}, + title = {Active isolation and damping of vibrations via Stewart + platform}, + year = 2003, + tags = {parallel robot}, + } + +Author(s) +: Hanieh, A. A. + +Year +: 2003 + +# Bibliography +Hanieh, A. A., *Active isolation and damping of vibrations via stewart platform* (Doctoral dissertation) (2003). Universit{\'e} Libre de Bruxelles, Brussels, Belgium, . [↩](#10e535e895bdcd6b921bff33ef68cd81) diff --git a/content/paper/hauge04_sensor_contr_space_based_six.md b/content/paper/hauge04_sensor_contr_space_based_six.md new file mode 100644 index 0000000..5fb26e1 --- /dev/null +++ b/content/paper/hauge04_sensor_contr_space_based_six.md @@ -0,0 +1,144 @@ ++++ +title = "Sensors and control of a space-based six-axis vibration isolation system" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Stewart Platforms]({{< relref "stewart_platforms" >}}), [Vibration Isolation]({{< relref "vibration_isolation" >}}), [Cubic Architecture]({{< relref "cubic_architecture" >}}) + +Reference +: (Hauge \& Campbell, 2004) + +Author(s) +: Hauge, G., & Campbell, M. + +Year +: 2004 + +**Discusses**: + +- Choice of sensors and control architecture +- Predictability and limitations of the system dynamics +- Two-Sensor control architecture +- Vibration isolation using a Stewart platform +- Experimental comparison of Force sensor and Inertial Sensor and associated control architecture for vibration isolation + + + +{{< figure src="/ox-hugo/hauge04_stewart_platform.png" caption="Figure 1: Hexapod for active vibration isolation" >}} + +**Stewart platform** (Figure [1](#org666133a)): + +- Low corner frequency +- Large actuator stroke (\\(\pm5mm\\)) +- Sensors in each strut (Figure [2](#org4d96564)): + - three-axis load cell + - base and payload geophone in parallel with the struts + - LVDT + + + +{{< figure src="/ox-hugo/hauge05_struts.png" caption="Figure 2: Strut" >}} + +> Force sensors typically work well because they are not as sensitive to payload and base dynamics, but are limited in performance by a low-frequency zero pair resulting from the cross-axial stiffness. + +**Performance Objective** (frequency domain metric): + +- The transmissibility should be close to 1 between 0-1.5Hz + \\(-3dB < |T(\omega)| < 3db\\) +- The transmissibility should be below -20dB in the 5-20Hz range + \\(|T(\omega)| < -20db\\) + +With \\(|T(\omega)|\\) is the Frobenius norm of the transmissibility matrix and is used to obtain a scalar performance metric. + +**Challenge**: + +- small frequency separation between the two requirements + +**Robustness**: + +- minimization of the transmissibility amplification (Bode's "pop") outside the performance region + +**Model**: + +- single strut axis as the cubic Stewart platform can be decomposed into 6 single-axis systems + + + +{{< figure src="/ox-hugo/hauge04_strut_model.png" caption="Figure 3: Strut model" >}} + +**Zero Pair when using a Force Sensor**: + +- The frequency of the zero pair corresponds to the resonance frequency of the payload mass and the "parasitic" stiffness (sum of the cross-axial, suspension, wiring stiffnesses) +- This zero pair is usually not predictable nor repeatable +- In this Stewart platform, this zero pair uncertainty is due to the internal wiring of the struts + +**Control**: + +- Single-axis controllers => combine them into a full six-axis controller => evaluate the full controller in terms of stability and robustness +- Sensitivity weighted LQG controller (SWLQG) => address robustness in flexible dynamic systems +- Three type of controller: + - Force feedback (cell-based) + - Inertial feedback (geophone-based) + - Combined force/velocity feedback (load cell/geophone based) + +> The use of multivariable and robust control on the full 6x6 hexapod does not improve performance over single-axis designs. + + +
+ Table 1: + Typical characteristics of sensors used for isolation in hexapod systems +
+ +| | **Load cell** | **Geophone** | +|-----------------------------------------|---------------------------------|-------------------------------------| +| Type | Relative | Inertial | +| Relationship with voice coil | Collocated and Dual | Non-Collocated and non-Dual | +| Open loop transfer function | (+) Alternating poles/zeros | (-) Large phase drop | +| Limitation from low-frequency zero pair | (-) Yes | (+) No | +| Sensitive to payload/base dynamics | (+) No | (-) Yes | +| Best frequency range | High (low-freq zero limitation) | Low (high-freq toll-off limitation) | + +**Ability of a sensor-actuator pair to improve performance**: +General system with input \\(u\\), performance \\(z\\), output \\(y\\) disturbance \\(u\\). + +Given a sensor \\(u\\) and actuator \\(y\\) and a controller \\(u = -K(s) y\\), the closed loop disturbance to performance transfer function can be written as: + +\\[ \left[ \frac{z}{w} \right]\_\text{CL} = \frac{G(s)\_{zw} + K(G(s)\_{zw} G(s)\_{yu} - G(s)\_{zu} G(s)\_{yw})}{1 + K G(s)\_{yu}} \\] + +In order to obtain a significant performance improvement is to use a high gain controller, _provided_ the term \\(G(s)\_{zw} + K(G(s)\_{zw} G(s)\_{yu} - G(s)\_{zu} G(s)\_{yw})\\) is small. + +We can compare the transfer function from \\(w\\) to \\(z\\) with and without a high gain controller. +And we find that for \\(u\\) and \\(y\\) to be an acceptable pair for high gain control: +\\[ \left| \frac{G(j\omega)\_{zw} G(j\omega)\_{yu} - G(j\omega)\_{zu} G(j\omega)\_{yw}}{K G(j\omega)\_{yu}} \right| \ll |G\_{zw}(j\omega)| \\] + +**Controllers**: + +**Force feedback**: + +- Performance limited by the low frequency zero-pair +- It is desirable to separate the zero-pair and first most are separated by at least a decade in frequency +- This can be achieve by reducing the cross-axis stiffness +- If the low frequency zero pair is inverted, robustness is lost +- Thus, the force feedback controller should be designed to have combined performance and robustness at frequencies at least a decade above the zero pair +- The presented controller as a high pass filter at to reduce the gain below the zero-pair, a lag at low frequency to improve phase margin, and a low pass filter for roll off + +**Inertial feedback**: + +- Non-Collocated => multiple phase drops that limit the bandwidth of the controller +- Good performance, but the transmissibility "pops" due to low phase margin and thus this indicates robustness problems + +**Combined force/velocity feedback**: + +- Use the low frequency performance advantages of geophone sensor with the high robustness advantages of the load cell sensor +- A Single-Input-Multiple-Outputs (SIMO) controller is found using LQG +- The performance requirements are met +- Good robustness + + + +{{< figure src="/ox-hugo/hauge04_obtained_transmissibility.png" caption="Figure 4: Experimental open loop (solid) and closed loop six-axis transmissibility using the geophone only controller (dotted), and combined geophone/load cell controller (dashed)" >}} + +# Bibliography +Hauge, G., & Campbell, M., *Sensors and control of a space-based six-axis vibration isolation system*, Journal of Sound and Vibration, *269(3-5)*, 913–931 (2004). http://dx.doi.org/10.1016/s0022-460x(03)00206-2 [↩](#f9698a1741fe7492aa9b7b42c7724670) diff --git a/content/paper/holler12_instr_x_ray_nano_imagin.md b/content/paper/holler12_instr_x_ray_nano_imagin.md new file mode 100644 index 0000000..5449f8b --- /dev/null +++ b/content/paper/holler12_instr_x_ray_nano_imagin.md @@ -0,0 +1,42 @@ ++++ +title = "An instrument for 3d x-ray nano-imaging" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Nano Active Stabilization System]({{< relref "nano_active_stabilization_system" >}}), [Positioning Stations]({{< relref "positioning_stations" >}}) + +Reference +: (Holler {\it et al.}, 2012) + +Author(s) +: Holler, M., Raabe, J., Diaz, A., Guizar-Sicairos, M., Quitmann, C., Menzel, A., & Bunk, O. + +Year +: 2012 + +Instrument similar to the NASS. +Obtain position stability of 10nm (standard deviation). + + + +{{< figure src="/ox-hugo/holler12_station.png" caption="Figure 1: Schematic of the tomography setup" >}} + +- **Limited resolution due to instrumentation**: + The resolution of ptychographic tomography remains above 100nm due to instabilities and drifts of the scanning systems. +- **Need of a Metrology System**: + + > To achieve positioning accuracy and stability in the nanometer range, one cannot rely on the position encoders built into individual positioning stages. + > A precise exteroceptive measurement of the relative position of the optical elements with respect to the sample is mandatory. + > Thus, thermal drifts and parasitic motions can be measured and compensated for. +- **Interferometer System Concept**: + The sample is aligned with the X-ray with the XYZ piezo stage. + As a result, the metrology sphere will be usually off center with respect to the rotation axis of the spindle. + That implies that the laser will not propagate back to the interferometer at all rotation angles. + A position sensitive detector (PSD) is used, it provides a measurement of the position of the sphere in the plane perpendicular to the laser. + The interferometer is positionned on top of a translation stage. The PSD information is used to close the loop so that the interferometer follows the displacement of the metrology sphere. +- **Feedback Loop**: Using the signals from the 2 interferometers, the loop is closed to compensate low frequency vibrations and thermal drifts. + +# Bibliography +Holler, M., Raabe, J., Diaz, A., Guizar-Sicairos, M., Quitmann, C., Menzel, A., & Bunk, O., *An instrument for 3d x-ray nano-imaging*, Review of Scientific Instruments, *83(7)*, 073703 (2012). http://dx.doi.org/10.1063/1.4737624 [↩](#66ab0e7602a1dedda963d7da60533b0d) diff --git a/content/paper/holterman05_activ_dampin_based_decoup_colloc_contr.md b/content/paper/holterman05_activ_dampin_based_decoup_colloc_contr.md new file mode 100644 index 0000000..d171a53 --- /dev/null +++ b/content/paper/holterman05_activ_dampin_based_decoup_colloc_contr.md @@ -0,0 +1,20 @@ ++++ +title = "Active damping based on decoupled collocated control" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Active Damping]({{< relref "active_damping" >}}) + +Reference +: (Holterman \& deVries, 2005) + +Author(s) +: Holterman, J., & deVries, T. + +Year +: 2005 + +# Bibliography +Holterman, J., & deVries, T., *Active damping based on decoupled collocated control*, IEEE/ASME Transactions on Mechatronics, *10(2)*, 135–145 (2005). http://dx.doi.org/10.1109/tmech.2005.844702 [↩](#cc7836a555fe4dbae791e17008c29bfd) diff --git a/content/paper/ito16_compar_class_high_precis_actuat.md b/content/paper/ito16_compar_class_high_precis_actuat.md new file mode 100644 index 0000000..f5f1160 --- /dev/null +++ b/content/paper/ito16_compar_class_high_precis_actuat.md @@ -0,0 +1,71 @@ ++++ +title = "Comparison and classification of high-precision actuators based on stiffness influencing vibration isolation" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Vibration Isolation]({{< relref "vibration_isolation" >}}), [Actuators]({{< relref "actuators" >}}) + +Reference +: (Shingo Ito \& Georg Schitter, 2016) + +Author(s) +: Ito, S., & Schitter, G. + +Year +: 2016 + + +## Classification of high-precision actuators {#classification-of-high-precision-actuators} + +
+ Table 1: + Zero/Low and High stiffness actuators +
+ +| **Categories** | **Pros** | **Cons** | +|----------------|---------------------------|-----------------------------| +| Zero stiffness | No vibration transmission | Large and Heavy | +| Low stiffness | High vibration isolation | Typically for low load | +| High Stiffness | High control bandwidth | High vibration transmission | + + +## Time Delay of Piezoelectric Electronics {#time-delay-of-piezoelectric-electronics} + +In this paper, the piezoelectric actuator/electronics adds a time delay which is much higher than the time delay added by the voice coil/electronics. + + +## Definition of low-stiffness and high-stiffness actuator {#definition-of-low-stiffness-and-high-stiffness-actuator} + +- **Low Stiffness** actuator is defined as the ones where the transmissibility stays below 0dB at all frequency +- **High Stiffness** actuator is defined as the ones where the transmissibility goes above 0dB at some frequency + +{{< figure src="/ox-hugo/ito16_low_high_stiffness_actuators.png" caption="Figure 1: Definition of low-stiffness and high-stiffness actuator" >}} + + +## Low-Stiffness / High-Stiffness characteristics {#low-stiffness-high-stiffness-characteristics} + +- The low stiffness actuators achieve smooth transition from active isolation to passive isolation. +- The high stiffness actuators can have a gap between the passive and active isolation vibration where the vibrations are amplified in a certain frequency band. + + +## Controller Design {#controller-design} + +{{< figure src="/ox-hugo/ito16_transmissibility.png" caption="Figure 2: Obtained transmissibility" >}} + + +## Discussion {#discussion} + +The stiffness requirement for low-stiffness actuators can be rephrased in the frequency domain as: "the cross-over frequency of the sensitivity function of the feedback system must be larger than \\(\sqrt{2} \omega\_r\\) with \\(\omega\_r\\) is the resonant frequency of the uncontrolled system". + +In practice, this is difficult to achieve with piezoelectric actuators as their first resonant frequency \\(\omega\_r\\) is **too close to other resonant frequencies to ensure close-loop stability**. +In contrast, the frequency band between the first and the other resonances of Lorentz actuators can be broad by design making them more suitable to construct a low-stiffness actuators. + +# Bibliography +Ito, S., & Schitter, G., *Comparison and classification of high-precision actuators based on stiffness influencing vibration isolation*, IEEE/ASME Transactions on Mechatronics, *21(2)*, 1169–1178 (2016). http://dx.doi.org/10.1109/tmech.2015.2478658 [↩](#aad53368e29e8a519e2f63857044fa46) + + +## Backlinks {#backlinks} + +- [Actuators]({{< relref "actuators" >}}) diff --git a/content/paper/jiao18_dynam_model_exper_analy_stewar.md b/content/paper/jiao18_dynam_model_exper_analy_stewar.md new file mode 100644 index 0000000..0c618fe --- /dev/null +++ b/content/paper/jiao18_dynam_model_exper_analy_stewar.md @@ -0,0 +1,20 @@ ++++ +title = "Dynamic modeling and experimental analyses of stewart platform with flexible hinges" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Stewart Platforms]({{< relref "stewart_platforms" >}}), [Flexible Joints]({{< relref "flexible_joints" >}}) + +Reference +: (Jian Jiao {\it et al.}, 2018) + +Author(s) +: Jiao, J., Wu, Y., Yu, K., & Zhao, R. + +Year +: 2018 + +# Bibliography +Jiao, J., Wu, Y., Yu, K., & Zhao, R., *Dynamic modeling and experimental analyses of stewart platform with flexible hinges*, Journal of Vibration and Control, *25(1)*, 151–171 (2018). http://dx.doi.org/10.1177/1077546318772474 [↩](#ee917739f88877d6c2758c1c36565deb) diff --git a/content/paper/legnani12_new_isotr_decoup_paral_manip.md b/content/paper/legnani12_new_isotr_decoup_paral_manip.md new file mode 100644 index 0000000..f722f5b --- /dev/null +++ b/content/paper/legnani12_new_isotr_decoup_paral_manip.md @@ -0,0 +1,34 @@ ++++ +title = "A new isotropic and decoupled 6-dof parallel manipulator" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Stewart Platforms]({{< relref "stewart_platforms" >}}) + +Reference +: (Legnani {\it et al.}, 2012) + +Author(s) +: Legnani, G., Fassi, I., Giberti, H., Cinquemani, S., & Tosi, D. + +Year +: 2012 + +- Concepts of isotropy and decoupling for parallel manipulators +- **isotropy**: the kinetostatic properties (same applicable force, same possible velocity, same stiffness) are identical in all directions (e.g. cubic configuration for Stewart platform) +- **decoupling**: each DoF of the end effector can be controlled by a **single** actuator (not the case for the Stewart platform) + +Example of generated isotropic manipulator (not decoupled). + + + +{{< figure src="/ox-hugo/legnani12_isotropy_gen.png" caption="Figure 1: Location of the leg axes using an isotropy generator" >}} + + + +{{< figure src="/ox-hugo/legnani12_generated_isotropy.png" caption="Figure 2: Isotropic configuration" >}} + +# Bibliography +Legnani, G., Fassi, I., Giberti, H., Cinquemani, S., & Tosi, D., *A new isotropic and decoupled 6-dof parallel manipulator*, Mechanism and Machine Theory, *58(nil)*, 64–81 (2012). http://dx.doi.org/10.1016/j.mechmachtheory.2012.07.008 [↩](#17295cbc2858c65ecc60d51b450233e3) diff --git a/content/paper/li01_simul_fault_vibrat_isolat_point.md b/content/paper/li01_simul_fault_vibrat_isolat_point.md new file mode 100644 index 0000000..c847f09 --- /dev/null +++ b/content/paper/li01_simul_fault_vibrat_isolat_point.md @@ -0,0 +1,233 @@ ++++ +title = "Simultaneous, fault-tolerant vibration isolation and pointing control of flexure jointed hexapods" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Stewart Platforms]({{< relref "stewart_platforms" >}}), [Vibration Isolation]({{< relref "vibration_isolation" >}}), [Cubic Architecture]({{< relref "cubic_architecture" >}}), [Flexible Joints]({{< relref "flexible_joints" >}}), [Multivariable Control]({{< relref "multivariable_control" >}}) + +Reference +: @phdthesis{li01_simul_fault_vibrat_isolat_point, + author = {Li, Xiaochun}, + school = {University of Wyoming}, + title = {Simultaneous, Fault-tolerant Vibration Isolation and + Pointing Control of Flexure Jointed Hexapods}, + year = 2001, + tags = {parallel robot}, + } + +Author(s) +: Li, X. + +Year +: 2001 + + +## Introduction {#introduction} + +**Stewart Platform**: + +- Cubic (mutually orthogonal) +- Flexure Joints => eliminate friction and backlash but add complexity to the dynamics + + + +{{< figure src="/ox-hugo/li01_stewart_platform.png" caption="Figure 1: Flexure jointed Stewart platform used for analysis and control" >}} + +**Goal**: + +- Precise pointing in two axes (sub micro-radians) +- simultaneously, providing both passive and active vibration isolation in six axes + +**Jacobian Analysis**: +\\[ \delta \mathcal{L} = J \delta \mathcal{X} \\] +The origin of \\(\\{P\\}\\) is taken as the center of mass of the payload. + +**Decoupling**: +If we refine the (force) inputs and (displacement) outputs as shown in Figure [2](#org2d875d1) or in Figure [3](#org3e247bd), we obtain a decoupled plant provided that: + +1. the payload mass/inertia matrix must be diagonal (the CoM is coincident with the origin of frame \\(\\{P\\}\\)) +2. the geometry of the hexapod and the attachment of the payload to the hexapod must be carefully chosen + +> For instance, if the hexapod has a mutually orthogonal geometry (cubic configuration), the payload's center of mass must coincide with the center of the cube formed by the orthogonal struts. + + + +{{< figure src="/ox-hugo/li01_decoupling_conf.png" caption="Figure 2: Decoupling the dynamics of the Stewart Platform using the Jacobians" >}} + + + +{{< figure src="/ox-hugo/li01_decoupling_conf_bis.png" caption="Figure 3: Decoupling the dynamics of the Stewart Platform using the Jacobians" >}} + + +## Simultaneous Vibration Isolation and Pointing Control {#simultaneous-vibration-isolation-and-pointing-control} + +Basic idea: + +- acceleration feedback is used to provide high-frequency vibration isolation +- cartesian pointing feedback can be used to provide low-frequency pointing + +The compensation is divided in frequency because: + +- pointing sensors often have low bandwidth +- acceleration sensors often have a poor low frequency response + +The control bandwidth is divided as follows: + +- low-frequency disturbances as attenuated and tracking is accomplished by feedback from low bandwidth pointing sensors +- mid-frequency disturbances are attenuated by feedback from band-pass sensors like accelerometer or load cells +- high-frequency disturbances are attenuated by passive isolation techniques + + +### Vibration Isolation {#vibration-isolation} + +The system is decoupled into six independent SISO subsystems using the architecture shown in Figure [4](#org3c42849). + + + +{{< figure src="/ox-hugo/li01_vibration_isolation_control.png" caption="Figure 4: Figure caption" >}} + +One of the subsystem plant transfer function is shown in Figure [4](#org3c42849) + + + +{{< figure src="/ox-hugo/li01_vibration_control_plant.png" caption="Figure 5: Plant transfer function of one of the SISO subsystem for Vibration Control" >}} + +Each compensator is designed using simple loop-shaping techniques. + +The unity control bandwidth of the isolation loop is designed to be from **5Hz to 50Hz**. + +> Despite a reasonably good match between the modeled and the measured transfer functions, the model based decoupling algorithm does not produce the expected decoupling. +> Only about 20 dB separation is achieve between the diagonal and off-diagonal responses. + + +### Pointing Control {#pointing-control} + +A block diagram of the pointing control system is shown in Figure [6](#org3c3e6ad). + + + +{{< figure src="/ox-hugo/li01_pointing_control.png" caption="Figure 6: Figure caption" >}} + +The plant is decoupled into two independent SISO subsystems. +The compensators are design with inverse-dynamics methods. + +The unity control bandwidth of the pointing loop is designed to be from **0Hz to 20Hz**. + +A feedforward control is added as shown in Figure [7](#orgc8fa614). + + + +{{< figure src="/ox-hugo/li01_feedforward_control.png" caption="Figure 7: Feedforward control" >}} + + +### Simultaneous Control {#simultaneous-control} + +The simultaneous vibration isolation and pointing control is approached in two ways: + +1. design and implement the vibration isolation control first, identify the pointing plant when the isolation loops are closed, then implement the pointing compensators +2. the reverse design order + +Figure [8](#org987b709) shows a parallel control structure where \\(G\_1(s)\\) is the dynamics from input force to output strut length. + + + +{{< figure src="/ox-hugo/li01_parallel_control.png" caption="Figure 8: A parallel scheme" >}} + +The transfer function matrix for the pointing loop after the vibration isolation is closed is still decoupled. The same happens when closing the pointing loop first and looking at the transfer function matrix of the vibration isolation. + +The effect of the isolation loop on the pointing loop is large around the natural frequency of the plant as shown in Figure [9](#orgb070c43). + + + +{{< figure src="/ox-hugo/li01_effect_isolation_loop_closed.png" caption="Figure 9: \\(\theta\_x/\theta\_{x\_d}\\) transfer function with the isolation loop closed (simulation)" >}} + +The effect of pointing control on the isolation plant has not much effect. + +> The interaction between loops may affect the transfer functions of the **first** closed loop, and thus affect its relative stability. + +The dynamic interaction effect: + +- only happens in the unity bandwidth of the loop transmission of the first closed loop. +- affect the closed loop transmission of the loop first closed (see Figures [10](#org0d64bc7) and [11](#orgb43f022)) + +As shown in Figure [10](#org0d64bc7), the peak resonance of the pointing loop increase after the isolation loop is closed. +The resonances happen at both crossovers of the isolation loop (15Hz and 50Hz) and they may show of loss of robustness. + + + +{{< figure src="/ox-hugo/li01_closed_loop_pointing.png" caption="Figure 10: Closed-loop transfer functions \\(\theta\_y/\theta\_{y\_d}\\) of the pointing loop before and after the vibration isolation loop is closed" >}} + +The same happens when first closing the vibration isolation loop and after the pointing loop (Figure [11](#orgb43f022)). +The first peak resonance of the vibration isolation loop at 15Hz is increased when closing the pointing loop. + + + +{{< figure src="/ox-hugo/li01_closed_loop_vibration.png" caption="Figure 11: Closed-loop transfer functions of the vibration isolation loop before and after the pointing control loop is closed" >}} + +> The isolation loop adds a second resonance peak at its high-frequency crossover in the pointing closed-loop transfer function, which may cause instability. +> Thus, it is recommended to design and implement the isolation control system first, and then identify the pointing plant with the isolation loop closed. + + +### Experimental results {#experimental-results} + +Two hexapods are stacked (Figure [12](#org12b1e53)): + +- the bottom hexapod is used to generate disturbances matching candidate applications +- the top hexapod provide simultaneous vibration isolation and pointing control + + + +{{< figure src="/ox-hugo/li01_test_bench.png" caption="Figure 12: Stacked Hexapods" >}} + +Using the vibration isolation control alone, no attenuation is achieved below 1Hz as shown in figure [13](#org4b99c02). + + + +{{< figure src="/ox-hugo/li01_vibration_isolation_control_results.png" caption="Figure 13: Vibration isolation control: open-loop (solid) vs. closed-loop (dashed)" >}} + +The simultaneous control is of dual use: + +- it provide simultaneous pointing and isolation control +- it can also be used to expand the bandwidth of the isolation control to low frequencies because the pointing loops suppress pointing errors due to both base vibrations and tracking + +The results of simultaneous control is shown in Figure [14](#orged11c63) where the bandwidth of the isolation control is expanded to very low frequency. + + + +{{< figure src="/ox-hugo/li01_simultaneous_control_results.png" caption="Figure 14: Simultaneous control: open-loop (solid) vs. closed-loop (dashed)" >}} + + +## Future research areas {#future-research-areas} + +Proposed future research areas include: + +- **Include base dynamics in the control**: + The base dynamics is here neglected since the movements of the base are very small. + The base dynamics could be measured by mounting accelerometers at the bottom of each strut or by using force sensors. + It then could be included in the feedforward path. +- **Robust control and MIMO design** +- **New decoupling method**: + The proposed decoupling algorithm do not produce the expected decoupling, despite a reasonably good match between the modeled and the measured transfer functions. + Incomplete decoupling increases the difficulty in designing the controller. + New decoupling methods are needed. + These methods must be static in order to be implemented practically on precision hexapods +- **Identification**: + Many advanced control methods require a more accurate model or identified plant. + A closed-loop identification method is propose to solve some problems with the current identification methods used. +- **Other possible sensors**: + Many sensors can be used to expand the utility of the Stewart platform: + - **3-axis load cells** to investigate the Coriolis and centripetal terms and new decoupling methods + - **LVDT** to provide differential position of the hexapod payload with respect to the base + - **Geophones** to provide payload and base velocity information + +# Bibliography +Li, X., *Simultaneous, fault-tolerant vibration isolation and pointing control of flexure jointed hexapods* (Doctoral dissertation) (2001). University of Wyoming, . [↩](#f885df380638b868e509fbbf75912d1e) diff --git a/content/paper/li01_simul_vibrat_isolat_point_contr.md b/content/paper/li01_simul_vibrat_isolat_point_contr.md new file mode 100644 index 0000000..dc73154 --- /dev/null +++ b/content/paper/li01_simul_vibrat_isolat_point_contr.md @@ -0,0 +1,23 @@ ++++ +title = "Simultaneous vibration isolation and pointing control of flexure jointed hexapods" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Stewart Platforms]({{< relref "stewart_platforms" >}}), [Vibration Isolation]({{< relref "vibration_isolation" >}}) + +Reference +: (Xiaochun Li {\it et al.}, 2001) + +Author(s) +: Li, X., Hamann, J. C., & McInroy, J. E. + +Year +: 2001 + +- if the hexapod is designed such that the payload mass/inertia matrix (\\(M\_x\\)) and \\(J^T J\\) are diagonal, the dynamics from \\(u\\) to \\(y\\) are decoupled. + +# Bibliography +Li, X., Hamann, J. C., & McInroy, J. E., *Simultaneous vibration isolation and pointing control of flexure jointed hexapods*, In , Smart Structures and Materials 2001: Smart Structures and Integrated Systems (pp. ) (2001). : . [↩](#e3df2691f750617c3995644d056d553a) diff --git a/content/paper/oomen18_advan_motion_contr_precis_mechat.md b/content/paper/oomen18_advan_motion_contr_precis_mechat.md new file mode 100644 index 0000000..dbe51f7 --- /dev/null +++ b/content/paper/oomen18_advan_motion_contr_precis_mechat.md @@ -0,0 +1,20 @@ ++++ +title = "Advanced motion control for precision mechatronics: control, identification, and learning of complex systems" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Motion Control]({{< relref "motion_control" >}}) + +Reference +: (Tom Oomen, 2018) + +Author(s) +: Oomen, T. + +Year +: 2018 + +# Bibliography +Oomen, T., *Advanced motion control for precision mechatronics: control, identification, and learning of complex systems*, IEEJ Journal of Industry Applications, *7(2)*, 127–140 (2018). http://dx.doi.org/10.1541/ieejjia.7.127 [↩](#73fd325bd20a6ef8972145e535f38198) diff --git a/content/paper/poel10_explor_activ_hard_mount_vibrat.md b/content/paper/poel10_explor_activ_hard_mount_vibrat.md new file mode 100644 index 0000000..d20a786 --- /dev/null +++ b/content/paper/poel10_explor_activ_hard_mount_vibrat.md @@ -0,0 +1,42 @@ ++++ +title = "An exploration of active hard mount vibration isolation for precision equipment" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Vibration Isolation]({{< relref "vibration_isolation" >}}) + +Reference +: @phdthesis{poel10_explor_activ_hard_mount_vibrat, + author = {van der Poel, Gerrit Wijnand}, + doi = {10.3990/1.9789036530163}, + isbn = {978-90-365-3016-3}, + school = {University of Twente}, + title = {An Exploration of Active Hard Mount Vibration Isolation for + Precision Equipment}, + url = {https://doi.org/10.3990/1.9789036530163}, + year = 2010, + year = 2010, + tags = {parallel robot}, + } + +Author(s) +: van der Poel, G. W. + +Year +: 2010 + +# Bibliography +van der Poel, G. W., *An exploration of active hard mount vibration isolation for precision equipment* (Doctoral dissertation) (2010). University of Twente, . [↩](#bcab548922e0e1ad6a2c310f63879596) diff --git a/content/paper/preumont02_force_feedb_versus_accel_feedb.md b/content/paper/preumont02_force_feedb_versus_accel_feedb.md new file mode 100644 index 0000000..6427946 --- /dev/null +++ b/content/paper/preumont02_force_feedb_versus_accel_feedb.md @@ -0,0 +1,49 @@ ++++ +title = "Force feedback versus acceleration feedback in active vibration isolation" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Vibration Isolation]({{< relref "vibration_isolation" >}}) + +Reference +: (Preumont {\it et al.}, 2002) + +Author(s) +: Preumont, A., A. Francois, Bossens, F., & Abu-Hanieh, A. + +Year +: 2002 + +Summary: + +- Compares the force feedback and acceleration feedback for active damping +- The use of a force sensor always give alternating poles and zeros in the open-loop transfer function between for force actuator and the force sensor which **guarantees the stability of the closed loop** +- Acceleration feedback produces alternating poles and zeros only when the flexible structure is stiff compared to the isolation system + +The force applied to a **rigid body** is proportional to its acceleration, thus sensing the total interface force gives a measured of the absolute acceleration of the solid body. +Thus force feedback and acceleration feedback are equivalent for solid bodies. +When there is a flexible payload, the two sensing options are not longer equivalent. + +- For light payload (Figure [1](#org7b4f6ee)), the acceleration feedback gives larger damping on the higher mode. +- For heavy payload (Figure [2](#org361b58f)), the acceleration feedback do not give alternating poles and zeros and thus for high control gains, the system becomes unstable + + + +{{< figure src="/ox-hugo/preumont02_force_acc_fb_light.png" caption="Figure 1: Root locus for **light** flexible payload, (a) Force feedback, (b) acceleration feedback" >}} + + + +{{< figure src="/ox-hugo/preumont02_force_acc_fb_heavy.png" caption="Figure 2: Root locus for **heavy** flexible payload, (a) Force feedback, (b) acceleration feedback" >}} + +Guaranteed stability of the force feedback: + +> If two arbitrary flexible, undamped structures are connected with a single-axis soft isolator with force feedback, the poles and zeros of the open-loop transfer function from the force actuator to the force sensor alternate on the imaginary axis. + +The same is true for the transfer function from the force actuator to the relative displacement of the actuator. + +> According to physical interpretation of the zeros, they represent the resonances of the subsystem constrained by the sensor and the actuator. + +# Bibliography +Preumont, A., A. Fran\ccois, Bossens, F., & Abu-Hanieh, A., *Force feedback versus acceleration feedback in active vibration isolation*, Journal of Sound and Vibration, *257(4)*, 605–613 (2002). http://dx.doi.org/10.1006/jsvi.2002.5047 [↩](#525e1e237b885f81fae3c25a3036ba6f) diff --git a/content/paper/preumont07_six_axis_singl_stage_activ.md b/content/paper/preumont07_six_axis_singl_stage_activ.md new file mode 100644 index 0000000..d0894e4 --- /dev/null +++ b/content/paper/preumont07_six_axis_singl_stage_activ.md @@ -0,0 +1,49 @@ ++++ +title = "A six-axis single-stage active vibration isolator based on stewart platform" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Vibration Isolation]({{< relref "vibration_isolation" >}}), [Stewart Platforms]({{< relref "stewart_platforms" >}}), [Flexible Joints]({{< relref "flexible_joints" >}}) + +Reference +: (Preumont {\it et al.}, 2007) + +Author(s) +: Preumont, A., Horodinca, M., Romanescu, I., Marneffe, B. d., Avraam, M., Deraemaeker, A., Bossens, F., … + +Year +: 2007 + +Summary: + +- **Cubic** Stewart platform (Figure [3](#org32a4f7c)) + - Provides uniform control capability + - Uniform stiffness in all directions + - minimizes the cross-coupling among actuators and sensors of different legs +- Flexible joints (Figure [2](#orgf807976)) +- Piezoelectric force sensors +- Voice coil actuators +- Decentralized feedback control approach for vibration isolation +- Effect of parasitic stiffness of the flexible joints on the IFF performance (Figure [1](#org744bdc9)) +- The Stewart platform has 6 suspension modes at different frequencies. + Thus the gain of the IFF controller cannot be optimal for all the modes. + It is better if all the modes of the platform are near to each other. +- Discusses the design of the legs in order to maximize the natural frequency of the local modes. +- To estimate the isolation performance of the Stewart platform, a scalar indicator is defined as the Frobenius norm of the transmissibility matrix + + + +{{< figure src="/ox-hugo/preumont07_iff_effect_stiffness.png" caption="Figure 1: Root locus with IFF with no parasitic stiffness and with parasitic stiffness" >}} + + + +{{< figure src="/ox-hugo/preumont07_flexible_joints.png" caption="Figure 2: Flexible joints used for the Stewart platform" >}} + + + +{{< figure src="/ox-hugo/preumont07_stewart_platform.png" caption="Figure 3: Stewart platform" >}} + +# Bibliography +Preumont, A., Horodinca, M., Romanescu, I., Marneffe, B. d., Avraam, M., Deraemaeker, A., Bossens, F., …, *A six-axis single-stage active vibration isolator based on stewart platform*, Journal of Sound and Vibration, *300(3-5)*, 644–661 (2007). http://dx.doi.org/10.1016/j.jsv.2006.07.050 [↩](#8096d5b2df73551d836ef96b7ca7efa4) diff --git a/content/paper/saxena12_advan_inter_model_contr_techn.md b/content/paper/saxena12_advan_inter_model_contr_techn.md new file mode 100644 index 0000000..c7e45ac --- /dev/null +++ b/content/paper/saxena12_advan_inter_model_contr_techn.md @@ -0,0 +1,88 @@ ++++ +title = "Advances in internal model control technique: a review and future prospects" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Complementary Filters]({{< relref "complementary_filters" >}}) + +Reference +: (Sahaj Saxena \& YogeshV Hote, 2012) + +Author(s) +: Saxena, S., & Hote, Y. + +Year +: 2012 + + +## Proposed Filter \\(F(s)\\) {#proposed-filter--fs} + +\begin{align\*} + F(s) &= \frac{1}{(\lambda s + 1)^n} \\\\\\ + F(s) &= \frac{n \lambda + 1}{(\lambda s + 1)^n} +\end{align\*} + + +## Internal Model Control {#internal-model-control} + +Central concept in IMC: control can be acheive only if the control system involves, either implicitly or explicitly, some representation of the process to be controlled. + + +### Basic IMC structure {#basic-imc-structure} + +IMC can be considered as a special case of classical feedback structure with plant \\(G(s)\\) and controller \\(C(s)\\). + +The plan model \\(G\_M(s)\\) is added and substracted into the feedback path of feedback controller. + +The structure can then be modified and we obtain a new controller \\(Q(s)\\). + +IMC is related to the classical controller through: + +\begin{align\*} +Q(s) = \frac{C(s)}{1+G\_M(s)C(s)} \\\\\\ +C(s) = \frac{Q(s)}{1-G\_M(s)Q(s)} +\end{align\*} + +Internal model control system is characterized by a control device consisting of the controller \\(Q(s)\\) and a predictive model \\(G\_M(s)\\) of the process (internal model). +The internal model loop uses the difference between the outputs of the process \\(G(s)\\) to be controlled and the internal model. +This difference \\(E(s)\\) represents the effect of disturbance and mismatch of the model. + + +### Features of IMC Structure {#features-of-imc-structure} + +Three properties: + +- **Dual stability**: assume that, if the plant model is perfect (\\(G\_M(s) = G(s)\\)) and disturbance is absent, the system becomes open-loop and the closed-loop stability is characterized by the stability of \\(G(s)\\) and \\(Q(s)\\) +- **Perfect control**: assume that, if the controller is equal to the model inverse (\\(Q(s) = G\_M^{-1}\\)) and \\(G(s) = G\_M(s)\\) with \\(G(s)\\) stable, then the system is perfectly controlled. +- **Zero Offset**: assume that, if the steady state gain of the controller is equal to the inverse of model gain, then offset free control is obtained for constant step of ramp type inputs and disturbances. As expected, the equivalent classical controller leads to integral action. + +Issues: + +- the plant model is never perfect +- inverting the model can cause instability +- control signal may have large magnitude + + +## Design procedure for IMC Compensator {#design-procedure-for-imc-compensator} + +1. factorize the plant model as \\(G\_M(s) = G\_{M-}(s)G\_{M+}(s)\\) where \\(G\_{M-}(s)\\) is invertible and minimum phase and \\(G\_{M+}(s)\\) is non-invertible and contains all non-minimum phase elements (delays, RHP zeros). Then, the controller is the inverse of the invertible portion of the plant model: \\(Q\_1(s) = G\_{M-}^{-1}(s)\\). +2. Filter selection: to make the controller proper and robust against the plant-model mismatch, a low pass filter of the form \\(F(s) = \frac{n \lambda}{(\lambda s + 1)^n}\\) is augmented with the inverted model \\(Q\_1(s)\\): \\(Q(s) = Q\_1(s) F(s)\\). \\(\lambda\\) is a tuning parameter which has an inverse relationship with the speed of closed loop response, \\(n\\) is selected such that \\(Q(s)\\) becomes proper. + + +## Issues in IMC {#issues-in-imc} + + +### Filter selection and tuning guidelines {#filter-selection-and-tuning-guidelines} + + +## Some advantages and future prospects {#some-advantages-and-future-prospects} + + +## Conclusion {#conclusion} + +The interesting feature regarding IMC is that the design scheme is identical to the open-loop control design procedure and the implementation of IMC results in a feedback system, thereby copying the disturbances and parameter uncertainties, while open-loop control is not. + +# Bibliography +Saxena, S., & Hote, Y., *Advances in internal model control technique: a review and future prospects*, IETE Technical Review, *29(6)*, 461 (2012). http://dx.doi.org/10.4103/0256-4602.105001 [↩](#14f767d8ba71d58fa8a3ec876628d750) diff --git a/content/paper/schellekens98_desig_precis.md b/content/paper/schellekens98_desig_precis.md new file mode 100644 index 0000000..37091c9 --- /dev/null +++ b/content/paper/schellekens98_desig_precis.md @@ -0,0 +1,20 @@ ++++ +title = "Design for precision: current status and trends" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Precision Engineering]({{< relref "precision_engineering" >}}) + +Reference +: (Schellekens {\it et al.}, 1998) + +Author(s) +: Schellekens, P., Rosielle, N., Vermeulen, H., Vermeulen, M., Wetzels, S., & Pril, W. + +Year +: 1998 + +# Bibliography +Schellekens, P., Rosielle, N., Vermeulen, H., Vermeulen, M., Wetzels, S., & Pril, W., *Design for precision: current status and trends*, Cirp Annals, *(2)*, 557–586 (1998). http://dx.doi.org/10.1016/s0007-8506(07)63243-0 [↩](#89f7d8f4c31f79f83e3666017687f525) diff --git a/content/paper/sebastian12_nanop_with_multip_sensor.md b/content/paper/sebastian12_nanop_with_multip_sensor.md new file mode 100644 index 0000000..42e2c6f --- /dev/null +++ b/content/paper/sebastian12_nanop_with_multip_sensor.md @@ -0,0 +1,20 @@ ++++ +title = "Nanopositioning with multiple sensors: a case study in data storage" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Sensor Fusion]({{< relref "sensor_fusion" >}}) + +Reference +: (Abu Sebastian \& Angeliki Pantazi, 2012) + +Author(s) +: Sebastian, A., & Pantazi, A. + +Year +: 2012 + +# Bibliography +Sebastian, A., & Pantazi, A., *Nanopositioning with multiple sensors: a case study in data storage*, IEEE Transactions on Control Systems Technology, *20(2)*, 382–394 (2012). http://dx.doi.org/10.1109/tcst.2011.2177982 [↩](#eb5a15a8c900d93de0b9bab520e1b6da) diff --git a/content/paper/spanos95_soft_activ_vibrat_isolat.md b/content/paper/spanos95_soft_activ_vibrat_isolat.md new file mode 100644 index 0000000..9f531f9 --- /dev/null +++ b/content/paper/spanos95_soft_activ_vibrat_isolat.md @@ -0,0 +1,62 @@ ++++ +title = "A soft 6-axis active vibration isolator" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Stewart Platforms]({{< relref "stewart_platforms" >}}), [Vibration Isolation]({{< relref "vibration_isolation" >}}) + +Reference +: (Spanos {\it et al.}, 1995) + +Author(s) +: Spanos, J., Rahman, Z., & Blackwood, G. + +Year +: 1995 + +**Stewart Platform** (Figure [1](#org4317d08)): + +- Voice Coil +- Flexible joints (cross-blades) +- Force Sensors +- Cubic Configuration + + + +{{< figure src="/ox-hugo/spanos95_stewart_platform.png" caption="Figure 1: Stewart Platform" >}} + +Total mass of the paylaod: 30kg +Center of gravity is 9cm above the geometry center of the mount (cube's center?). + +Limitation of the **Decentralized Force Feedback**: + +- high frequency pole due to internal resonances of the struts +- low frequency zero due to the rotational stiffness of the flexible joints + +After redesign of the struts: + +- high frequency pole at 4.7kHz +- low frequency zero at 2.6Hz but non-minimum phase (not explained). + Small viscous damping material in the cross blade flexures made the zero minimum phase again. + + + +{{< figure src="/ox-hugo/spanos95_iff_plant.png" caption="Figure 2: Experimentally measured transfer function from voice coil drive voltage to collocated load cell output voltage" >}} + +The controller used consisted of: + +- second order low pass filter to gain stabilize the plant at high frequencies and provide steep roll-off +- first order lead filter to provide adequate phase margin at the high frequency crossover +- first order lag filter to provide adequate phase margin at the low frequency crossover +- a first order high pass filter to attenuate the excess gain resulting from the low frequency zero + +The results in terms of transmissibility are shown in Figure [3](#orgf128817). + + + +{{< figure src="/ox-hugo/spanos95_results.png" caption="Figure 3: Experimentally measured Frobenius norm of the 6-axis transmissibility" >}} + +# Bibliography +Spanos, J., Rahman, Z., & Blackwood, G., *A soft 6-axis active vibration isolator*, In , Proceedings of 1995 American Control Conference - ACC'95 (pp. ) (1995). : . [↩](#a48f6708d087625a42ca2375407a2bc4) diff --git a/content/paper/stankevic17_inter_charac_rotat_stages_x_ray_nanot.md b/content/paper/stankevic17_inter_charac_rotat_stages_x_ray_nanot.md new file mode 100644 index 0000000..e419720 --- /dev/null +++ b/content/paper/stankevic17_inter_charac_rotat_stages_x_ray_nanot.md @@ -0,0 +1,33 @@ ++++ +title = "Interferometric characterization of rotation stages for x-ray nanotomography" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Nano Active Stabilization System]({{< relref "nano_active_stabilization_system" >}}), [Positioning Stations]({{< relref "positioning_stations" >}}) + +Reference +: (Tomas Stankevic {\it et al.}, 2017) + +Author(s) +: Stankevic, T., Engblom, C., Langlois, F., Alves, F., Lestrade, A., Jobert, N., Cauchon, G., … + +Year +: 2017 + +- Similar Station than the NASS +- Similar Metrology with fiber based interferometers and cylindrical reference mirror + + + +{{< figure src="/ox-hugo/stankevic17_station.png" caption="Figure 1: Positioning Station" >}} + +- **Thermal expansion**: Stabilized down to \\(5mK/h\\) using passive water flow through the baseplate below the sample stage and in the interferometry reference frame. +- **Controller**: Two Independant PID loops +- Repeatable errors => feedforward (Look Up Table) +- Non-repeatable errors => feedback +- Result: 40nm runout error + +# Bibliography +Stankevic, T., Engblom, C., Langlois, F., Alves, F., Lestrade, A., Jobert, N., Cauchon, G., …, *Interferometric characterization of rotation stages for x-ray nanotomography*, Review of Scientific Instruments, *88(5)*, 053703 (2017). http://dx.doi.org/10.1063/1.4983405 [↩](#abb1be5f48179255f7d8c45b1784bcf8) diff --git a/content/paper/tang18_decen_vibrat_contr_voice_coil.md b/content/paper/tang18_decen_vibrat_contr_voice_coil.md new file mode 100644 index 0000000..7086d0b --- /dev/null +++ b/content/paper/tang18_decen_vibrat_contr_voice_coil.md @@ -0,0 +1,21 @@ ++++ +title = "Decentralized vibration control of a voice coil motor-based stewart parallel mechanism: simulation and experiments" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Stewart Platforms]({{< relref "stewart_platforms" >}}) + +Reference +: (Jie Tang {\it et al.}, 2018) + +Author(s) +: Tang, J., Cao, D., & Yu, T. + +Year +: 2018 + +# Bibliography +Tang, J., Cao, D., & Yu, T., *Decentralized vibration control of a voice coil motor-based stewart parallel mechanism: simulation and experiments*, Proceedings of the Institution of Mechanical Engineers, Part C: Journal of Mechanical Engineering Science, *233(1)*, 132–145 (2018). http://dx.doi.org/10.1177/0954406218756941 [↩](#85f81ff678aabc195636437548e4234a) diff --git a/content/paper/tjepkema12_sensor_fusion_activ_vibrat_isolat_precis_equip.md b/content/paper/tjepkema12_sensor_fusion_activ_vibrat_isolat_precis_equip.md new file mode 100644 index 0000000..54ed4ad --- /dev/null +++ b/content/paper/tjepkema12_sensor_fusion_activ_vibrat_isolat_precis_equip.md @@ -0,0 +1,50 @@ ++++ +title = "Sensor fusion for active vibration isolation in precision equipment" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Sensor Fusion]({{< relref "sensor_fusion" >}}), [Vibration Isolation]({{< relref "vibration_isolation" >}}) + +Reference +: (Tjepkema {\it et al.}, 2012) + +Author(s) +: Tjepkema, D., Dijk, J. v., & Soemers, H. + +Year +: 2012 + + +## Relative motion Control {#relative-motion-control} + +Control law: \\(f = -G(x-w)\\) + +\\[ \frac{x}{w} = \frac{k+G}{ms^2 + k+G} \\] +\\[ \frac{x}{F} = \frac{1}{ms^2 + k+G} \\] + + +## Force Control {#force-control} + +Control law: \\(f = -G F\_a = -G \left(f-k(x-w)\right)\\) + +\\[ \frac{x}{w} = \frac{k}{(1+G)ms^2 + k} \\] +\\[ \frac{x}{F} = \frac{1+G}{(1+G)ms^2 + k} \\] + + +## Inertial Control {#inertial-control} + +Control law: \\(f = -Gx\\) + +\\[ \frac{x}{w} = \frac{k}{ms^2 + k+G} \\] +\\[ \frac{x}{F} = \frac{1}{ms^2 + k+G} \\] + + +## Design constraints and control bandwidth {#design-constraints-and-control-bandwidth} + +Heavier sensor => lower noise but it is harder to maintain collocation with the actuator => that limits the bandwidth. +There is a compromise between sensor noise and the influence of the sensor size on the system's design and on the control bandwidth. + +# Bibliography +Tjepkema, D., Dijk, J. v., & Soemers, H., *Sensor fusion for active vibration isolation in precision equipment*, Journal of Sound and Vibration, *331(4)*, 735–749 (2012). http://dx.doi.org/10.1016/j.jsv.2011.09.022 [↩](#ef30bc07c91e9d46a42198757dc610de) diff --git a/content/paper/wang12_autom_marker_full_field_hard.md b/content/paper/wang12_autom_marker_full_field_hard.md new file mode 100644 index 0000000..bdf06bc --- /dev/null +++ b/content/paper/wang12_autom_marker_full_field_hard.md @@ -0,0 +1,29 @@ ++++ +title = "Automated markerless full field hard x-ray microscopic tomography at sub-50 nm 3-dimension spatial resolution" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Nano Active Stabilization System]({{< relref "nano_active_stabilization_system" >}}) + +Reference +: (Jun Wang {\it et al.}, 2012) + +Author(s) +: Wang, J., Chen, Y. K., Yuan, Q., Tkachuk, A., Erdonmez, C., Hornberger, B., & Feser, M. + +Year +: 2012 + +**Introduction of Markers**: +That limits the type of samples that is studied + +There is a need for markerless nano-tomography +=> the key requirement is the precision and stability of the positioning stages. + +**Passive rotational run-out error system**: +It uses calibrated metrology disc and capacitive sensors + +# Bibliography +Wang, J., Chen, Y. K., Yuan, Q., Tkachuk, A., Erdonmez, C., Hornberger, B., & Feser, M., *Automated markerless full field hard x-ray microscopic tomography at sub-50 nm 3-dimension spatial resolution*, Applied Physics Letters, *100(14)*, 143107 (2012). http://dx.doi.org/10.1063/1.3701579 [↩](#1bccbe15e35ed02229afbc6528c5057e) diff --git a/content/paper/wang16_inves_activ_vibrat_isolat_stewar.md b/content/paper/wang16_inves_activ_vibrat_isolat_stewar.md new file mode 100644 index 0000000..6bbeb70 --- /dev/null +++ b/content/paper/wang16_inves_activ_vibrat_isolat_stewar.md @@ -0,0 +1,57 @@ ++++ +title = "Investigation on active vibration isolation of a stewart platform with piezoelectric actuators" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Stewart Platforms]({{< relref "stewart_platforms" >}}), [Vibration Isolation]({{< relref "vibration_isolation" >}}), [Flexible Joints]({{< relref "flexible_joints" >}}) + +Reference +: (Wang {\it et al.}, 2016) + +Author(s) +: Wang, C., Xie, X., Chen, Y., & Zhang, Z. + +Year +: 2016 + +**Model of the Stewart platform**: + +- Struts are treated as flexible beams +- Payload and the base are treated as flexible plates +- The FRF synthesis method permits to derive FRFs of the Stewart platform + +The model is compared with a Finite Element model and is shown to give the same results. +The proposed model is thus effective. + + + +{{< figure src="/ox-hugo/wang16_stewart_platform.png" caption="Figure 1: Stewart Platform" >}} + +**Control**: +Combines: + +- the FxLMS-based adaptive inverse control => suppress transmission of periodic vibrations +- direct feedback of integrated forces => dampen vibration of inherent modes and thus reduce random vibrations + +Force Feedback (Figure [2](#org4b1fbd9)). + +- the force sensor is mounted **between the base and the strut** + + + +{{< figure src="/ox-hugo/wang16_force_feedback.png" caption="Figure 2: Feedback of integrated forces in the platform" >}} + +Sorts of HAC-LAC control: + +- LAC: Decentralized integral force feedback +- HAC: Inertial control using accelerometers. Use of the Jacobian to decouple the motion and then Fx-LMS based adaptive control is used + +**Experimental validation**: + +- All 6 transfer function from actuator force to force sensors are almost the same (gain offset) +- Effectiveness of control methods are shown + +# Bibliography +Wang, C., Xie, X., Chen, Y., & Zhang, Z., *Investigation on active vibration isolation of a stewart platform with piezoelectric actuators*, Journal of Sound and Vibration, *383()*, 1–19 (2016). http://dx.doi.org/10.1016/j.jsv.2016.07.021 [↩](#db95fac7cd46c14e2b4f38e8ca4158fe) diff --git a/content/paper/yang19_dynam_model_decoup_contr_flexib.md b/content/paper/yang19_dynam_model_decoup_contr_flexib.md new file mode 100644 index 0000000..23a1942 --- /dev/null +++ b/content/paper/yang19_dynam_model_decoup_contr_flexib.md @@ -0,0 +1,137 @@ ++++ +title = "Dynamic modeling and decoupled control of a flexible stewart platform for vibration isolation" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Stewart Platforms]({{< relref "stewart_platforms" >}}), [Vibration Isolation]({{< relref "vibration_isolation" >}}), [Flexible Joints]({{< relref "flexible_joints" >}}), [Cubic Architecture]({{< relref "cubic_architecture" >}}) + +Reference +: (Yang {\it et al.}, 2019) + +Author(s) +: Yang, X., Wu, H., Chen, B., Kang, S., & Cheng, S. + +Year +: 2019 + +**Discusses**: + +- flexible-rigid model of Stewart platform +- the impact of joint stiffness is compensated using a displacement sensor and a force sensor +- then the MIMO system is decoupled in modal space and 6 SISO controllers are applied for vibration isolation using force sensors + +The joint stiffness impose a limitation on the control performance using force sensors as it adds a zero at low frequency in the dynamics. +Thus, this stiffness is taken into account in the dynamics and compensated for. + +**Stewart platform** (Figure [1](#org936d8f9)): + +- piezoelectric actuators +- flexible joints (Figure [2](#orgd8c916a)) +- force sensors (used for vibration isolation) +- displacement sensors (used to decouple the dynamics) +- cubic (even though not said explicitly) + + + +{{< figure src="/ox-hugo/yang19_stewart_platform.png" caption="Figure 1: Stewart Platform" >}} + + + +{{< figure src="/ox-hugo/yang19_flexible_joints.png" caption="Figure 2: Flexible Joints" >}} + +The stiffness of the flexible joints (Figure [2](#orgd8c916a)) are computed with an FEM model and shown in Table [1](#table--tab:yang19-stiffness-flexible-joints). + + +
+ Table 1: + Stiffness of flexible joints obtained by FEM +
+ +| \\(k\_{\theta u},\ k\_{\psi u}\\) | \\(72 Nm/rad\\) | +|-----------------------------------|-----------------| +| \\(k\_{\theta s}\\) | \\(51 Nm/rad\\) | +| \\(k\_{\psi s}\\) | \\(62 Nm/rad\\) | +| \\(k\_{\gamma s}\\) | \\(64 Nm/rad\\) | + +**Dynamics**: +If the bending and torsional stiffness of the flexible joints are neglected: +\\[ M \ddot{x} + C \dot{x} + K x = J^T f \\] + +- \\(M\\) is the mass matrix +- \\(C\\) is the damping matrix +- \\(K\\) is the stiffness matrix +- \\(x\\) is the generalized coordinates, representing the displacement and orientation of the payload plate +- \\(f\\) is the actuator forces +- \\(J\\) is the Jacobian matrix + +In this paper, the parasitic bending stiffness of the flexible joints are considered: +\\[ M \ddot{x} + C \dot{x} + (K + K\_e) x = J^T f \\] +where \\(K\_e\\) is the stiffness matrix induced by the parasitic stiffness of the flexible joints. + +Analytical expression for \\(K\_e\\) are derived in the paper. + +**Controller Design**: +There is a strong coupling between the input forces and the state variables in the task space. +The traditional modal decoupled control strategy cannot work with the flexible Stewart platform because it is impossible to achieve simultaneous diagonalization of the mass, damped and stiffness matrices. + +To make the six-dof system decoupled into six single-dof isolators, a controller based on the leg's force and position feedback is designed. + +> The idea is to synthesize the control force that can compensate the parasitic bending and torsional torques of the flexible joints and simultaneously achieve diagonalization of the matrices \\(M\\), \\(C\\) and \\(K\\) + +The force measured by the force sensors are: +\\[ y = f - k J x - c J \dot{x} \\] +The displacements measured by the position sensors are: +\\[ z = [\Delta l\_1\ \dots\ \Delta l\_6]^T \\] + +Let's apply the feedback control based on both the force sensor and the position sensor: +\\[ f = -H(s) y + (1 + H(s)) K\_{el} z \\] +where \\(K\_{el} = J^{-T} K\_e J^T\\) is the stiffness matrix of the flexible joints expressed in joint space. + +We thus obtain: +\\[ f = \frac{H(s)}{1 + H(s)} (k J x + c J \dot{x}) + J^{-T} K\_e x \\] + +If we substitute \\(f\\) in the dynamic equation, we obtain that the parasitic stiffness effect of the flexible joints has been compensated by the actuation forces and the system can now be decoupled in modal space \\(x = \Phi u\\). +\\(\Phi\\) is the modal matrix selected such that \\(\Phi^T M \Phi = I\_6\\) and \\(k \Phi^T J^T J \Phi = \text{diag}(\omega\_1^2\ \dots\ \omega\_6^2)\\): +\\[ s^2 + \frac{1}{1 + H(s)} \frac{c \omega\_i^2}{k} s + \frac{1}{1 + H(s)} \omega\_i^2 = 0, \quad i = 1,\ \dots,\ 6 \\] + +The six-dof system is now transformed into a six one-dof system where \\(H(s)\\) can be designed for control purpose. + +In order to apply this control strategy: + +- A force sensor and displacement sensor are need in each strut +- The joint stiffness has to be known +- The jacobian has to be computed +- No information about modal matrix is needed + +The block diagram of the control strategy is represented in Figure [3](#orgeb7080e). + + + +{{< figure src="/ox-hugo/yang19_control_arch.png" caption="Figure 3: Control Architecture used" >}} + +\\(H(s)\\) is designed as a proportional plus integral compensator: +\\[ H(s) = k\_p + k\_i/s \\] + +Substituting \\(H(s)\\) in the equation of motion gives that: + +- an increase of \\(k\_i\\) increase the damping and thus suppress the resonance peaks +- an increase of \\(k\_p\\) lowers the resonance frequency and thus the bandwidth of vibration isolation is examped + +**Experimental Validation**: +An external Shaker is used to excite the base and accelerometers are located on the base and mobile platforms to measure their motion. +The results are shown in Figure [4](#org48c287d). +In theory, the vibration performance can be improved, however in practice, increasing the gain causes saturation of the piezoelectric actuators and then the instability occurs. + + + +{{< figure src="/ox-hugo/yang19_results.png" caption="Figure 4: Frequency response of the acceleration ratio between the paylaod and excitation (Transmissibility)" >}} + +> A model-based controller is then designed based on the leg’s force and position feedback. +> The position feedback compensates the effect of parasitic bending and torsional stiffness of the flexible joints. +> The force feedback makes the six-DOF MIMO system decoupled into six SISO subsystems in modal space, where the control gains can be designed and analyzed more effectively and conveniently. +> The proportional and integral gains in the sub-controller are used to separately regulate the vibration isolation bandwidth and active damping simultaneously for the six vibration modes. + +# Bibliography +Yang, X., Wu, H., Chen, B., Kang, S., & Cheng, S., *Dynamic modeling and decoupled control of a flexible stewart platform for vibration isolation*, Journal of Sound and Vibration, *439()*, 398–412 (2019). http://dx.doi.org/10.1016/j.jsv.2018.10.007 [↩](#d39b6222c8dd2baf188d677733c2826c) diff --git a/content/paper/zhang11_six_dof.md b/content/paper/zhang11_six_dof.md new file mode 100644 index 0000000..028597d --- /dev/null +++ b/content/paper/zhang11_six_dof.md @@ -0,0 +1,34 @@ ++++ +title = "Six dof active vibration control using stewart platform with non-cubic configuration" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Stewart Platforms]({{< relref "stewart_platforms" >}}), [Vibration Isolation]({{< relref "vibration_isolation" >}}) + +Reference +: (Zhen Zhang {\it et al.}, 2011) + +Author(s) +: Zhang, Z., Liu, J., Mao, J., Guo, Y., & Ma, Y. + +Year +: 2011 + +- **Non-cubic** stewart platform +- **Flexible** joints +- Magnetostrictive actuators +- Strong coupled motions along different axes +- Non-cubic architecture => permits to have larger workspace which was required +- Structure parameters (radius of plates, length of struts) are determined by optimization of the condition number of the Jacobian matrix +- **Accelerometers** for active isolation +- Adaptive FIR filters for active isolation control + + + +{{< figure src="/ox-hugo/zhang11_platform.png" caption="Figure 1: Prototype of the non-cubic stewart platform" >}} + +# Bibliography +Zhang, Z., Liu, J., Mao, J., Guo, Y., & Ma, Y., *Six dof active vibration control using stewart platform with non-cubic configuration*, In , 2011 6th IEEE Conference on Industrial Electronics and Applications (pp. ) (2011). : . [↩](#a457d4de462d2fe52a1bbb848182b554) diff --git a/content/websites/control_bootcamp.md b/content/websites/control_bootcamp.md new file mode 100644 index 0000000..5badb33 --- /dev/null +++ b/content/websites/control_bootcamp.md @@ -0,0 +1,126 @@ ++++ +title = "Control Bootcamp" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: + + + + +## Overview {#overview} + + +## Linear Systems {#linear-systems} + + +## Stability and Eigenvalues {#stability-and-eigenvalues} + + +## Linearizing Around a Fixed Point {#linearizing-around-a-fixed-point} + + +## Controllability {#controllability} + + +## Controllability, Reachability, and Eigenvalue Placement {#controllability-reachability-and-eigenvalue-placement} + + +## Controllability and the Discrete-Time Impulse Response {#controllability-and-the-discrete-time-impulse-response} + + +## Degrees of Controllability and Gramians {#degrees-of-controllability-and-gramians} + + +## Controllability and the PBH Test {#controllability-and-the-pbh-test} + + +## Cayley-Hamilton Theorem {#cayley-hamilton-theorem} + + +## Reachability and Controllability with Cayley-Hamilton {#reachability-and-controllability-with-cayley-hamilton} + + +## Inverted Pendulum on a Cart {#inverted-pendulum-on-a-cart} + + +## Eigenvalue Placement for the Inverted Pendulum on a Cart {#eigenvalue-placement-for-the-inverted-pendulum-on-a-cart} + + +## Linear Quadratic Regulator (LQR) Control for the Inverted Pendulum on a Cart {#linear-quadratic-regulator--lqr--control-for-the-inverted-pendulum-on-a-cart} + + +## Motivation for Full-State Estimation {#motivation-for-full-state-estimation} + + +## Observability {#observability} + + +## Full-State Estimation {#full-state-estimation} + + +## Kalman Filter {#kalman-filter} + + +## Observability Example in Matlab {#observability-example-in-matlab} + + +## Observability Example in Matlab (Part 2) {#observability-example-in-matlab--part-2} + + +## Kalman Filter Example in Matlab {#kalman-filter-example-in-matlab} + + +## Linear Quadratic Gaussian (LQG) {#linear-quadratic-gaussian--lqg} + + +## LQG Example in Matlab {#lqg-example-in-matlab} + + +## Introduction to Robust Control {#introduction-to-robust-control} + + +## Three Equivalent Representations of Linear Systems {#three-equivalent-representations-of-linear-systems} + + +## Example Frequency Response (Bode Plot) for Spring-Mass-Damper {#example-frequency-response--bode-plot--for-spring-mass-damper} + + +## Laplace Transforms and the Transfer Function {#laplace-transforms-and-the-transfer-function} + + +## Benefits of Feedback on Cruise Control Example {#benefits-of-feedback-on-cruise-control-example} + + +## Benefits of Feedback on Cruise Control Example (Part 2) {#benefits-of-feedback-on-cruise-control-example--part-2} + + +## Cruise Control Example with Proportional-Integral (PI) control {#cruise-control-example-with-proportional-integral--pi--control} + + +## Sensitivity and Complementary Sensitivity {#sensitivity-and-complementary-sensitivity} + + +## Sensitivity and Complementary Sensitivity (Part 2) {#sensitivity-and-complementary-sensitivity--part-2} + + +## Loop shaping {#loop-shaping} + + +## Loop Shaping Example for Cruise Control {#loop-shaping-example-for-cruise-control} + + +## Sensitivity and Robustness {#sensitivity-and-robustness} + + +## Limitations on Robustness {#limitations-on-robustness} + + +## Cautionary Tale About Inverting the Plant Dynamics {#cautionary-tale-about-inverting-the-plant-dynamics} + + +## Control systems with non-minimum phase dynamics {#control-systems-with-non-minimum-phase-dynamics} + +<./biblio/references.bib> diff --git a/content/websites/data_driven_dynamical_systems_with_machine_learning.md b/content/websites/data_driven_dynamical_systems_with_machine_learning.md new file mode 100644 index 0000000..617e125 --- /dev/null +++ b/content/websites/data_driven_dynamical_systems_with_machine_learning.md @@ -0,0 +1,183 @@ ++++ +title = "Data-Driven Dynamical Systems with Machine Learning" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: + + +## Data-Driven Control {#data-driven-control} + + +### Overview {#overview} + + +#### Challenges {#challenges} + +With modern control (LQR, LQG, H-Infinity), we work with linear system (or linearized systems) and we develop a control law that minimize some cost function. + +Challenging systems where modern control is not efficient: + +- Non-linear systems +- System with unknown dynamics +- High dimensional systems +- Limited measurements or control inputs + +For these kinds of systems, data-driven control seems to be a good alternative. + + +#### What is control? {#what-is-control} + +It's an optimization constrained by dynamics of the system. + +The optimization is easy when the system is linear and the cost function is quadratic. +When the system is non-linear or when the cost function is non quadratic, the optimization becomes complicated (non closed form). Then the optimization should be rerun on the fly, which is what is done with MPC (model predictive control). + + +#### What is Machine-Learning? {#what-is-machine-learning} + +Machine-learning is powerful non-linear optimization based on data. + + +#### Outline of this lecture {#outline-of-this-lecture} + + +##### Data Driven Models {#data-driven-models} + +For the problem of unknown dynamics, we can use data driven models. +The goal is to collect data to generate of model of the system. + + +##### Machine Learning Control {#machine-learning-control} + +When we use the control inputs, the system changes and the system model might be not valid anymore. +The idea is to use data driven machine learning directly to learn a good controller. + + +##### Sensor and actuator placement {#sensor-and-actuator-placement} + +Use powerful optimization techniques from machine learning to learn what are good sensors and actuators. + + +### Linear System Identification {#linear-system-identification} + + +### The Goal of Balanced Model Reduction {#the-goal-of-balanced-model-reduction} + + +### Change of Variables in Control Systems {#change-of-variables-in-control-systems} + + +### Change of Variables in Control Systems (Correction) {#change-of-variables-in-control-systems--correction} + + +### Balancing Example {#balancing-example} + + +### Balancing Transformation {#balancing-transformation} + + +### Balanced Truncation {#balanced-truncation} + + +### Balanced Truncation Example {#balanced-truncation-example} + + +### Error Bounds for Balanced Truncation {#error-bounds-for-balanced-truncation} + + +### Balanced Proper Orthogonal Decomposition {#balanced-proper-orthogonal-decomposition} + + +### BPOD and Output Projection {#bpod-and-output-projection} + + +### Balanced Truncation and BPOD Example {#balanced-truncation-and-bpod-example} + + +### Eigensystem Realization Algorithm {#eigensystem-realization-algorithm} + + +### ERA and the Discrete-Time Impulse Response {#era-and-the-discrete-time-impulse-response} + + +### Eigensystem Realization Algorithm Procedure {#eigensystem-realization-algorithm-procedure} + + +### Balanced Models with ERA {#balanced-models-with-era} + + +### Observer Kalman Filter Identification {#observer-kalman-filter-identification} + + +### ERA\_OKID Example in Matlab {#era-okid-example-in-matlab} + + +## System Identification {#system-identification} + + +### Full-State Models with Control {#full-state-models-with-control} + + +### Regression Models {#regression-models} + + +### Dynamic Mode Decomposition with Control {#dynamic-mode-decomposition-with-control} + + +### DMD Control Example {#dmd-control-example} + + +### Koopman with Control {#koopman-with-control} + + +### Sparse Nonlinear Models with Control {#sparse-nonlinear-models-with-control} + + +### Model Predictive Control {#model-predictive-control} + + +### Sparse Identification of Nonlinear Dynamics for Model Predictive Control {#sparse-identification-of-nonlinear-dynamics-for-model-predictive-control} + + +## Machine Learning Control {#machine-learning-control} + + +### Overview {#overview} + + +### Genetic Algorithms {#genetic-algorithms} + + +### Tuning a PID Controller with Genetic Algorithms {#tuning-a-pid-controller-with-genetic-algorithms} + + +### Tuning a PID Controller with Genetic Algorithms (Part 2) {#tuning-a-pid-controller-with-genetic-algorithms--part-2} + + +### Genetic Programming {#genetic-programming} + + +### Genetic Programming Control {#genetic-programming-control} + + +## Extremum Seeking Control {#extremum-seeking-control} + + +### Introduction {#introduction} + + +### Matlab {#matlab} + + +### Simulink {#simulink} + + +### Challenging Example {#challenging-example} + + +### Applications {#applications} + +<./biblio/references.bib> diff --git a/content/zettels/_index.md b/content/zettels/_index.md new file mode 100644 index 0000000..c2a149d --- /dev/null +++ b/content/zettels/_index.md @@ -0,0 +1,8 @@ ++++ +title = "Zettels" +author = ["Thomas Dehaeze"] +type = "zettels" +draft = false ++++ + +Here is the list of subjects I took note about. diff --git a/content/zettels/active_damping.md b/content/zettels/active_damping.md new file mode 100644 index 0000000..3397a75 --- /dev/null +++ b/content/zettels/active_damping.md @@ -0,0 +1,16 @@ ++++ +title = "Active Damping" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: + +<./biblio/references.bib> + + +## Backlinks {#backlinks} + +- [Active isolation and damping of vibrations via stewart platform]({{< relref "hanieh03_activ_stewar" >}}) +- [Active damping based on decoupled collocated control]({{< relref "holterman05_activ_dampin_based_decoup_colloc_contr" >}}) diff --git a/content/zettels/actuators.md b/content/zettels/actuators.md new file mode 100644 index 0000000..742b807 --- /dev/null +++ b/content/zettels/actuators.md @@ -0,0 +1,72 @@ ++++ +title = "Actuators" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: + + +## How to choose the correct actuator for my application? {#how-to-choose-the-correct-actuator-for-my-application} + +For vibration isolation: + +- In (Shingo Ito \& Georg Schitter, 2016), the effect of the actuator stiffness on the attainable vibration isolation is studied ([Notes]({{< relref "ito16_compar_class_high_precis_actuat" >}})) + + +## Piezoelectric {#piezoelectric} + +| Suppliers | Links | +|--------------|------------------------------------------------------------------------------------| +| Cedrat | [link](http://www.cedrat-technologies.com/) | +| PI | [link](https://www.physikinstrumente.com/en/) | +| Piezo System | [link](https://www.piezosystem.com/products/piezo%5Factuators/stacktypeactuators/) | +| Noliac | [link](http://www.noliac.com/) | + +A model of a multi-layer monolithic piezoelectric stack actuator is described in (Fleming, 2010) ([Notes]({{< relref "fleming10_nanop_system_with_force_feedb" >}})). + + +## Voice Coil {#voice-coil} + +| Suppliers | Links | +|----------------------|----------------------------------------------| +| Geeplus | [link](https://www.geeplus.com/) | +| Maccon | [link](https://www.maccon.de/en.html) | +| TDS PP | [link](https://www.tds-pp.com/en/) | +| H2tech | [link](https://www.h2wtech.com/) | +| PBA Systems | [link](http://www.pbasystems.com.sg/) | +| Celera Motion | [link](https://www.celeramotion.com/) | +| Beikimco | [link](http://www.beikimco.com/) | +| Electromate | [link](https://www.electromate.com/) | +| Magnetic Innovations | [link](https://www.magneticinnovations.com/) | + + +## Shaker {#shaker} + +| Suppliers | Links | +|--------------------|---------------------------------------------------------------| +| BKSV | [link](https://www.bksv.com/en/products/shakers-and-exciters) | +| Vibration Research | [link](https://vibrationresearch.com/shakers/) | +| Sentek Dynamics | [link](https://www.sentekdynamics.com/) | + + + + +## Brush-less DC Motor {#brush-less-dc-motor} + +- (Yedamale, 2003) + + + +# Bibliography +Ito, S., & Schitter, G., *Comparison and classification of high-precision actuators based on stiffness influencing vibration isolation*, IEEE/ASME Transactions on Mechatronics, *21(2)*, 1169–1178 (2016). http://dx.doi.org/10.1109/tmech.2015.2478658 [↩](#aad53368e29e8a519e2f63857044fa46) + +Fleming, A., *Nanopositioning system with force feedback for high-performance tracking and vibration control*, IEEE/ASME Transactions on Mechatronics, *15(3)*, 433–447 (2010). http://dx.doi.org/10.1109/tmech.2009.2028422 [↩](#c823f68dd2a72b9667a61b3c046b4731) + +Yedamale, P., *Brushless dc (bldc) motor fundamentals*, Microchip Technology Inc, *20()*, 3–15 (2003). [↩](#d2e68d39d09d7e8e71ff08a6ebd45400) + + +## Backlinks {#backlinks} + +- [Comparison and classification of high-precision actuators based on stiffness influencing vibration isolation]({{< relref "ito16_compar_class_high_precis_actuat" >}}) diff --git a/content/zettels/complementary_filters.md b/content/zettels/complementary_filters.md new file mode 100644 index 0000000..ceaf8e9 --- /dev/null +++ b/content/zettels/complementary_filters.md @@ -0,0 +1,15 @@ ++++ +title = "Complementary Filters" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: + +<./biblio/references.bib> + + +## Backlinks {#backlinks} + +- [Advances in internal model control technique: a review and future prospects]({{< relref "saxena12_advan_inter_model_contr_techn" >}}) diff --git a/content/zettels/cubic_architecture.md b/content/zettels/cubic_architecture.md new file mode 100644 index 0000000..a8d62bc --- /dev/null +++ b/content/zettels/cubic_architecture.md @@ -0,0 +1,17 @@ ++++ +title = "Cubic Architecture" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: + +<./biblio/references.bib> + + +## Backlinks {#backlinks} + +- [Sensors and control of a space-based six-axis vibration isolation system]({{< relref "hauge04_sensor_contr_space_based_six" >}}) +- [Dynamic modeling and decoupled control of a flexible stewart platform for vibration isolation]({{< relref "yang19_dynam_model_decoup_contr_flexib" >}}) +- [Simultaneous, fault-tolerant vibration isolation and pointing control of flexure jointed hexapods]({{< relref "li01_simul_fault_vibrat_isolat_point" >}}) diff --git a/content/zettels/electronics.md b/content/zettels/electronics.md new file mode 100644 index 0000000..e2b1958 --- /dev/null +++ b/content/zettels/electronics.md @@ -0,0 +1,15 @@ ++++ +title = "Electronics" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: + +<./biblio/references.bib> + + +## Backlinks {#backlinks} + +- [The art of electronics - third edition]({{< relref "horowitz15_art_of_elect_third_edition" >}}) diff --git a/content/zettels/flexible_joints.md b/content/zettels/flexible_joints.md new file mode 100644 index 0000000..44257bc --- /dev/null +++ b/content/zettels/flexible_joints.md @@ -0,0 +1,21 @@ ++++ +title = "Flexible Joints" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: + +<./biblio/references.bib> + + +## Backlinks {#backlinks} + +- [A six-axis single-stage active vibration isolator based on stewart platform]({{< relref "preumont07_six_axis_singl_stage_activ" >}}) +- [Nanometre-cutting machine using a stewart-platform parallel mechanism]({{< relref "furutani04_nanom_cuttin_machin_using_stewar" >}}) +- [Dynamic modeling and experimental analyses of stewart platform with flexible hinges]({{< relref "jiao18_dynam_model_exper_analy_stewar" >}}) +- [Dynamic modeling and decoupled control of a flexible stewart platform for vibration isolation]({{< relref "yang19_dynam_model_decoup_contr_flexib" >}}) +- [Simultaneous, fault-tolerant vibration isolation and pointing control of flexure jointed hexapods]({{< relref "li01_simul_fault_vibrat_isolat_point" >}}) +- [Investigation on active vibration isolation of a stewart platform with piezoelectric actuators]({{< relref "wang16_inves_activ_vibrat_isolat_stewar" >}}) +- [Identification and decoupling control of flexure jointed hexapods]({{< relref "chen00_ident_decoup_contr_flexur_joint_hexap" >}}) diff --git a/content/zettels/force_sensors.md b/content/zettels/force_sensors.md new file mode 100644 index 0000000..48177a6 --- /dev/null +++ b/content/zettels/force_sensors.md @@ -0,0 +1,23 @@ ++++ +title = "Force Sensors" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: + + +## Suppliers {#suppliers} + +| | | +|-----|---------------------------------------------------------------| +| PCB | [link](https://www.pcb.com/products/productfinder.aspx?tx=17) | + + +## Dynamics and Noise of a piezoelectric force sensor {#dynamics-and-noise-of-a-piezoelectric-force-sensor} + +An analysis the dynamics and noise of a piezoelectric force sensor is done in (Fleming, 2010) ([Notes]({{< relref "fleming10_nanop_system_with_force_feedb" >}})). + +# Bibliography +Fleming, A., *Nanopositioning system with force feedback for high-performance tracking and vibration control*, IEEE/ASME Transactions on Mechatronics, *15(3)*, 433–447 (2010). http://dx.doi.org/10.1109/tmech.2009.2028422 [↩](#c823f68dd2a72b9667a61b3c046b4731) diff --git a/content/zettels/h_infinity_control.md b/content/zettels/h_infinity_control.md new file mode 100644 index 0000000..945ce8c --- /dev/null +++ b/content/zettels/h_infinity_control.md @@ -0,0 +1,24 @@ ++++ +title = "H Infinity Control" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: + + +## Nice Citations {#nice-citations} + +From _Rosenbrock, H. H. (1974). Computer-Aided Control System Design, Academic Press, New York_: + +> Solutions are constrained by so many requirements that it is virtually impossible to list them all. +> The designer finds himself threading a maze of such requirements, attempting to reconcile conflicting demands of cost, performance, easy maintenance, and so on. +> A good design usually has strong aesthetic appeal to those who are competent in the subject. + +<./biblio/references.bib> + + +## Backlinks {#backlinks} + +- [Guidelines for the selection of weighting functions for h-infinity control]({{< relref "bibel92_guidel_h" >}}) diff --git a/content/zettels/inertial_sensors.md b/content/zettels/inertial_sensors.md new file mode 100644 index 0000000..aa403e7 --- /dev/null +++ b/content/zettels/inertial_sensors.md @@ -0,0 +1,47 @@ ++++ +title = "Inertial Sensors" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: [Position Sensors]({{< relref "position_sensors" >}}) + + +## Reviews {#reviews} + +- (Collette {\it et al.}, 2012) + + +## Accelerometers {#accelerometers} + +| | | +|--------------------|---------------------------------------------------------------| +| Micromega Dynamics | [link](https://micromega-dynamics.com/products/) | +| MMF | [link](https://www.mmf.de/seismic%5Faccelerometers.htm) | +| PCB | [link](https://www.pcb.com/products/productfinder.aspx?tx=14) | + +Wireless Accelerometers + +- + + + +{{< figure src="/ox-hugo/inertial_sensors_characteristics_accelerometers.png" caption="Figure 1: Characteristics of commercially available accelerometers (Collette {\it et al.}, 2011)" >}} + + +## Geophones {#geophones} + +| | | +|----------|----------------------------------------------------------------| +| Sercel | [link](http://www.sercel.com/products/Pages/seismometers.aspx) | +| Wilcoxon | [link](https://wilcoxon.com/) | + + + +{{< figure src="/ox-hugo/inertial_sensors_characteristics_geophone.png" caption="Figure 2: Characteristics of commercially available geophones (Collette {\it et al.}, 2011)" >}} + +# Bibliography +Collette, C., Janssens, S., Fernandez-Carmona, P., Artoos, K., Guinchard, M., Hauviller, C., & Preumont, A., *Review: inertial sensors for low-frequency seismic vibration measurement*, Bulletin of the Seismological Society of America, *102(4)*, 1289–1300 (2012). http://dx.doi.org/10.1785/0120110223 [↩](#dd5109075933cf543c7eba0979c0ba50) + +Collette, C., Artoos, K., Guinchard, M., Janssens, S., Carmona Fernandez, P., & Hauviller, C., *Review of sensors for low frequency seismic vibration measurement* (2011). [↩](#642a18d86de4e062c6afb0f5f20501c4) diff --git a/content/zettels/metrology.md b/content/zettels/metrology.md new file mode 100644 index 0000000..2375c1f --- /dev/null +++ b/content/zettels/metrology.md @@ -0,0 +1,15 @@ ++++ +title = "Metrology" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: + +<./biblio/references.bib> + + +## Backlinks {#backlinks} + +- [Fundamental principles of engineering nanometrology]({{< relref "leach14_fundam_princ_engin_nanom" >}}) diff --git a/content/zettels/motion_control.md b/content/zettels/motion_control.md new file mode 100644 index 0000000..c4c489c --- /dev/null +++ b/content/zettels/motion_control.md @@ -0,0 +1,15 @@ ++++ +title = "Motion Control" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: + +<./biblio/references.bib> + + +## Backlinks {#backlinks} + +- [Advanced motion control for precision mechatronics: control, identification, and learning of complex systems]({{< relref "oomen18_advan_motion_contr_precis_mechat" >}}) diff --git a/content/zettels/multivariable_control.md b/content/zettels/multivariable_control.md new file mode 100644 index 0000000..dec055f --- /dev/null +++ b/content/zettels/multivariable_control.md @@ -0,0 +1,19 @@ ++++ +title = "Multivariable Control" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: + +<./biblio/references.bib> + + +## Backlinks {#backlinks} + +- [Multivariable feedback control: analysis and design]({{< relref "skogestad07_multiv_feedb_contr" >}}) +- [Multivariable control systems: an engineering approach]({{< relref "albertos04_multiv_contr_system" >}}) +- [Position control in lithographic equipment]({{< relref "butler11_posit_contr_lithog_equip" >}}) +- [Implementation challenges for multivariable control: what you did not learn in school!]({{< relref "garg07_implem_chall_multiv_contr" >}}) +- [Simultaneous, fault-tolerant vibration isolation and pointing control of flexure jointed hexapods]({{< relref "li01_simul_fault_vibrat_isolat_point" >}}) diff --git a/content/zettels/nano_active_stabilization_system.md b/content/zettels/nano_active_stabilization_system.md new file mode 100644 index 0000000..622ad02 --- /dev/null +++ b/content/zettels/nano_active_stabilization_system.md @@ -0,0 +1,17 @@ ++++ +title = "Nano Active Stabilization System" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: + +<./biblio/references.bib> + + +## Backlinks {#backlinks} + +- [Interferometric characterization of rotation stages for x-ray nanotomography]({{< relref "stankevic17_inter_charac_rotat_stages_x_ray_nanot" >}}) +- [Automated markerless full field hard x-ray microscopic tomography at sub-50 nm 3-dimension spatial resolution]({{< relref "wang12_autom_marker_full_field_hard" >}}) +- [An instrument for 3d x-ray nano-imaging]({{< relref "holler12_instr_x_ray_nano_imagin" >}}) diff --git a/content/zettels/position_sensors.md b/content/zettels/position_sensors.md new file mode 100644 index 0000000..be16bf1 --- /dev/null +++ b/content/zettels/position_sensors.md @@ -0,0 +1,115 @@ ++++ +title = "Position Sensors" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: + + +## Reviews of position sensors {#reviews-of-position-sensors} + +- (Collette {\it et al.}, 2012) +- (Andrew Fleming, 2013) + + +## Relative Position Sensors {#relative-position-sensors} + + + +{{< figure src="/ox-hugo/position_sensor_characteristics_relative_sensor.png" caption="Figure 1: Characteristics of relative measurement sensors (Collette {\it et al.}, 2011)" >}} + + + +{{< figure src="/ox-hugo/position_sensor_characteristics.png" caption="Figure 2: Position sensor characteristics (Andrew Fleming, 2013)" >}} + + +### Strain Gauge {#strain-gauge} + + +### Capacitive Sensor {#capacitive-sensor} + +Description: + +- +- + +| | | +|----------------|-------------------------------------------------------------------------------------------------| +| Micro Sense | [link](http://www.microsense.net/products-position-sensors.htm) | +| Micro-Epsilon | [link](https://www.micro-epsilon.com/displacement-position-sensors/capacitive-sensor/) | +| PI | [link](https://www.physikinstrumente.com/en/technology/sensor-technologies/capacitive-sensors/) | +| Unipulse | [link](https://www.unipulse.com/product/ps-ia/) | +| Lion-Precision | [link](https://www.lionprecision.com/products/capacitive-sensors) | + + +### Inductive Sensor (Eddy Current) {#inductive-sensor--eddy-current} + +| | | +|----------------|------------------------------------------------------------------------------------------| +| Micro-Epsilon | [link](https://www.micro-epsilon.com/displacement-position-sensors/eddy-current-sensor/) | +| Lion Precision | [link](https://www.lionprecision.com/products/eddy-current-sensors) | + + +### Inductive Sensor (LVDT) {#inductive-sensor--lvdt} + +| | | +|---------------|--------------------------------------------------------------------------------------------| +| Micro-Epsilon | [link](https://www.micro-epsilon.com/displacement-position-sensors/inductive-sensor-lvdt/) | +| Keyence | [link](https://www.keyence.eu/products/measure/contact-distance-lvdt/gt2/index.jsp) | + + +### Interferometers {#interferometers} + +| | | +|----------|----------------------------------------------------------------------------------------------------------| +| Attocube | [link](http://www.attocube.com/) | +| Zygo | [link](https://www.zygo.com/?/met/markets/stageposition/zmi/) | +| Smaract | [link](https://www.smaract.com/interferometry) | +| Qutools | [link](https://www.qutools.com/qudis/) | +| Renishaw | [link](https://www.renishaw.com/en/fibre-optic-laser-encoder-products--6594) | +| Sios | [link](https://sios-de.com/products/length-measurement/laser-interferometer/) | +| Keysight | [link](https://www.keysight.com/en/pc-1000000393%3Aepsg%3Apgr/laser-heads?nid=-536900395.0&cc=FR&lc=fre) | + +
+ Table 1: + Characteristics of Environmental Units +
+ +| | Temperature (\\(\pm\ ^oC\\)) | Pressure (\\(\pm\ hPa\\)) | Humidity \\(\pm\ \% RH\\) | Wavelength Accuracy (\\(\pm\ \text{ppm}\\)) | +|-----------|------------------------------|---------------------------|---------------------------|---------------------------------------------| +| Attocube | 0.1 | 1 | 2 | 0.5 | +| Renishaw | 0.2 | 1 | 6 | 1 | +| Picoscale | 0.2 | 2 | 2 | 1 | + +Figure [3](#orgce716a4) is taken from +(Yoon-Soo Jang \& Seung-Woo Kim, 2017). + + + +{{< figure src="/ox-hugo/position_sensor_interferometer_precision.png" caption="Figure 3: Expected precision of interferometer as a function of measured distance" >}} + + +### Fiber Optic Displacement Sensor {#fiber-optic-displacement-sensor} + +| | | +|----------|----------------------------------------------------| +| Unipulse | [link](https://www.unipulse.com/product/atw200-2/) | + +# Bibliography +Collette, C., Janssens, S., Mokrani, B., Fueyo-Roza, L., Artoos, K., Esposito, M., Fernandez-Carmona, P., …, *Comparison of new absolute displacement sensors*, In , International Conference on Noise and Vibration Engineering (ISMA) (pp. ) (2012). : . [↩](#0b0b67de6dddc4d28031ab2d3b28cd3d) + +Fleming, A. J., *A review of nanometer resolution position sensors: operation and performance*, Sensors and Actuators A: Physical, *190(nil)*, 106–126 (2013). http://dx.doi.org/10.1016/j.sna.2012.10.016 [↩](#3fb5b61524290e36d639a4fac65703d0) + +Collette, C., Artoos, K., Guinchard, M., Janssens, S., Carmona Fernandez, P., & Hauviller, C., *Review of sensors for low frequency seismic vibration measurement* (2011). [↩](#642a18d86de4e062c6afb0f5f20501c4) + +Jang, Y., & Kim, S., *Compensation of the refractive index of air in laser interferometer for distance measurement: a review*, International Journal of Precision Engineering and Manufacturing, *18(12)*, 1881–1890 (2017). http://dx.doi.org/10.1007/s12541-017-0217-y [↩](#7658b1219a4458a62ae8c6f51b767542) + + +## Backlinks {#backlinks} + +- [A review of nanometer resolution position sensors: operation and performance]({{< relref "fleming13_review_nanom_resol_posit_sensor" >}}) +- [Measurement technologies for precision positioning]({{< relref "gao15_measur_techn_precis_posit" >}}) diff --git a/content/zettels/positioning_stations.md b/content/zettels/positioning_stations.md new file mode 100644 index 0000000..06850b2 --- /dev/null +++ b/content/zettels/positioning_stations.md @@ -0,0 +1,17 @@ ++++ +title = "Positioning Stations" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: + +<./biblio/references.bib> + + +## Backlinks {#backlinks} + +- [Interferometric characterization of rotation stages for x-ray nanotomography]({{< relref "stankevic17_inter_charac_rotat_stages_x_ray_nanot" >}}) +- [Position control in lithographic equipment]({{< relref "butler11_posit_contr_lithog_equip" >}}) +- [An instrument for 3d x-ray nano-imaging]({{< relref "holler12_instr_x_ray_nano_imagin" >}}) diff --git a/content/zettels/precision_engineering.md b/content/zettels/precision_engineering.md new file mode 100644 index 0000000..5880820 --- /dev/null +++ b/content/zettels/precision_engineering.md @@ -0,0 +1,16 @@ ++++ +title = "Precision Engineering" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: + +<./biblio/references.bib> + + +## Backlinks {#backlinks} + +- [Design for precision: current status and trends]({{< relref "schellekens98_desig_precis" >}}) +- [Basics of precision engineering - 1st edition]({{< relref "leach18_basic_precis_engin_edition" >}}) diff --git a/content/zettels/reference_books.md b/content/zettels/reference_books.md new file mode 100644 index 0000000..dace8c7 --- /dev/null +++ b/content/zettels/reference_books.md @@ -0,0 +1,20 @@ ++++ +title = "Reference Books" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: + +<./biblio/references.bib> + + +## Backlinks {#backlinks} + +- [Multivariable feedback control: analysis and design]({{< relref "skogestad07_multiv_feedb_contr" >}}) +- [The design of high performance mechatronics - 2nd revised edition]({{< relref "schmidt14_desig_high_perfor_mechat_revis_edition" >}}) +- [Parallel robots : mechanics and control]({{< relref "taghirad13_paral" >}}) +- [Modal testing: theory, practice and application]({{< relref "ewins00_modal" >}}) +- [The art of electronics - third edition]({{< relref "horowitz15_art_of_elect_third_edition" >}}) +- [Vibration Control of Active Structures - Fourth Edition]({{< relref "preumont18_vibrat_contr_activ_struc_fourt_edition" >}}) diff --git a/content/zettels/sensor_fusion.md b/content/zettels/sensor_fusion.md new file mode 100644 index 0000000..25136d7 --- /dev/null +++ b/content/zettels/sensor_fusion.md @@ -0,0 +1,19 @@ ++++ +title = "Sensor Fusion" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: + +<./biblio/references.bib> + + +## Backlinks {#backlinks} + +- [Sensor fusion for active vibration isolation in precision equipment]({{< relref "tjepkema12_sensor_fusion_activ_vibrat_isolat_precis_equip" >}}) +- [Vibration control of flexible structures using fusion of inertial sensors and hyper-stable actuator-sensor pairs]({{< relref "collette14_vibrat" >}}) +- [Sensor fusion methods for high performance active vibration isolation systems]({{< relref "collette15_sensor_fusion_method_high_perfor" >}}) +- [Nanopositioning system with force feedback for high-performance tracking and vibration control]({{< relref "fleming10_nanop_system_with_force_feedb" >}}) +- [Nanopositioning with multiple sensors: a case study in data storage]({{< relref "sebastian12_nanop_with_multip_sensor" >}}) diff --git a/content/zettels/stewart_platforms.md b/content/zettels/stewart_platforms.md new file mode 100644 index 0000000..4061a45 --- /dev/null +++ b/content/zettels/stewart_platforms.md @@ -0,0 +1,34 @@ ++++ +title = "Stewart Platforms" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: + +<./biblio/references.bib> + + +## Backlinks {#backlinks} + +- [Six dof active vibration control using stewart platform with non-cubic configuration]({{< relref "zhang11_six_dof" >}}) +- [Decentralized vibration control of a voice coil motor-based stewart parallel mechanism: simulation and experiments]({{< relref "tang18_decen_vibrat_contr_voice_coil" >}}) +- [Dynamic modeling and decoupled control of a flexible stewart platform for vibration isolation]({{< relref "yang19_dynam_model_decoup_contr_flexib" >}}) +- [Parallel robots : mechanics and control]({{< relref "taghirad13_paral" >}}) +- [Investigation on active vibration isolation of a stewart platform with piezoelectric actuators]({{< relref "wang16_inves_activ_vibrat_isolat_stewar" >}}) +- [Identification and decoupling control of flexure jointed hexapods]({{< relref "chen00_ident_decoup_contr_flexur_joint_hexap" >}}) +- [The stewart platform manipulator: a review]({{< relref "dasgupta00_stewar_platf_manip" >}}) +- [Modeling and control of vibration in mechanical systems]({{< relref "du10_model_contr_vibrat_mechan_system" >}}) +- [Studies on stewart platform manipulator: a review]({{< relref "furqan17_studies_stewar_platf_manip" >}}) +- [Nanometre-cutting machine using a stewart-platform parallel mechanism]({{< relref "furutani04_nanom_cuttin_machin_using_stewar" >}}) +- [An intelligent control system for multiple degree-of-freedom vibration isolation]({{< relref "geng95_intel_contr_system_multip_degree" >}}) +- [Active isolation and damping of vibrations via stewart platform]({{< relref "hanieh03_activ_stewar" >}}) +- [Sensors and control of a space-based six-axis vibration isolation system]({{< relref "hauge04_sensor_contr_space_based_six" >}}) +- [Dynamic modeling and experimental analyses of stewart platform with flexible hinges]({{< relref "jiao18_dynam_model_exper_analy_stewar" >}}) +- [Simultaneous, fault-tolerant vibration isolation and pointing control of flexure jointed hexapods]({{< relref "li01_simul_fault_vibrat_isolat_point" >}}) +- [A new isotropic and decoupled 6-dof parallel manipulator]({{< relref "legnani12_new_isotr_decoup_paral_manip" >}}) +- [Simultaneous vibration isolation and pointing control of flexure jointed hexapods]({{< relref "li01_simul_vibrat_isolat_point_contr" >}}) +- [A six-axis single-stage active vibration isolator based on stewart platform]({{< relref "preumont07_six_axis_singl_stage_activ" >}}) +- [Vibration Control of Active Structures - Fourth Edition]({{< relref "preumont18_vibrat_contr_activ_struc_fourt_edition" >}}) +- [A soft 6-axis active vibration isolator]({{< relref "spanos95_soft_activ_vibrat_isolat" >}}) diff --git a/content/zettels/system_identification.md b/content/zettels/system_identification.md new file mode 100644 index 0000000..3a26238 --- /dev/null +++ b/content/zettels/system_identification.md @@ -0,0 +1,15 @@ ++++ +title = "System Identification" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: + +<./biblio/references.bib> + + +## Backlinks {#backlinks} + +- [Modal testing: theory, practice and application]({{< relref "ewins00_modal" >}}) diff --git a/content/zettels/test.md b/content/zettels/test.md new file mode 100644 index 0000000..93e6581 --- /dev/null +++ b/content/zettels/test.md @@ -0,0 +1,103 @@ ++++ +title = "Test File" +author = ["Thomas Dehaeze"] +draft = false ++++ + +> This is a quote! + +```matlab +a = 2; +figure; +``` + +
+
+ +This is an important part of the text. + +
+ +See Eq. [eq:test1](#eq:test1) and [eq:test2](#eq:test2). + +\begin{equation} + a = 1 +\end{equation} + +\begin{equation} + a = 2 \label{eq:test2} +\end{equation} + +Also look at [1](#org7280632) \eqref{eq:test2}. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. + +Some text. diff --git a/content/zettels/vibration_isolation.md b/content/zettels/vibration_isolation.md new file mode 100644 index 0000000..b231f3a --- /dev/null +++ b/content/zettels/vibration_isolation.md @@ -0,0 +1,33 @@ ++++ +title = "Vibration Isolation" +author = ["Thomas Dehaeze"] +draft = false ++++ + +Tags +: + +<./biblio/references.bib> + + +## Backlinks {#backlinks} + +- [Six dof active vibration control using stewart platform with non-cubic configuration]({{< relref "zhang11_six_dof" >}}) +- [Dynamic modeling and decoupled control of a flexible stewart platform for vibration isolation]({{< relref "yang19_dynam_model_decoup_contr_flexib" >}}) +- [Investigation on active vibration isolation of a stewart platform with piezoelectric actuators]({{< relref "wang16_inves_activ_vibrat_isolat_stewar" >}}) +- [Review of active vibration isolation strategies]({{< relref "collette11_review_activ_vibrat_isolat_strat" >}}) +- [Vibration control of flexible structures using fusion of inertial sensors and hyper-stable actuator-sensor pairs]({{< relref "collette14_vibrat" >}}) +- [Sensor fusion methods for high performance active vibration isolation systems]({{< relref "collette15_sensor_fusion_method_high_perfor" >}}) +- [Modeling and control of vibration in mechanical systems]({{< relref "du10_model_contr_vibrat_mechan_system" >}}) +- [An intelligent control system for multiple degree-of-freedom vibration isolation]({{< relref "geng95_intel_contr_system_multip_degree" >}}) +- [Active isolation and damping of vibrations via stewart platform]({{< relref "hanieh03_activ_stewar" >}}) +- [Sensors and control of a space-based six-axis vibration isolation system]({{< relref "hauge04_sensor_contr_space_based_six" >}}) +- [Comparison and classification of high-precision actuators based on stiffness influencing vibration isolation]({{< relref "ito16_compar_class_high_precis_actuat" >}}) +- [Simultaneous, fault-tolerant vibration isolation and pointing control of flexure jointed hexapods]({{< relref "li01_simul_fault_vibrat_isolat_point" >}}) +- [Simultaneous vibration isolation and pointing control of flexure jointed hexapods]({{< relref "li01_simul_vibrat_isolat_point_contr" >}}) +- [An exploration of active hard mount vibration isolation for precision equipment]({{< relref "poel10_explor_activ_hard_mount_vibrat" >}}) +- [Force feedback versus acceleration feedback in active vibration isolation]({{< relref "preumont02_force_feedb_versus_accel_feedb" >}}) +- [A six-axis single-stage active vibration isolator based on stewart platform]({{< relref "preumont07_six_axis_singl_stage_activ" >}}) +- [Vibration Control of Active Structures - Fourth Edition]({{< relref "preumont18_vibrat_contr_activ_struc_fourt_edition" >}}) +- [A soft 6-axis active vibration isolator]({{< relref "spanos95_soft_activ_vibrat_isolat" >}}) +- [Sensor fusion for active vibration isolation in precision equipment]({{< relref "tjepkema12_sensor_fusion_activ_vibrat_isolat_precis_equip" >}})